2024-12-08 11:19:50,638 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-08 11:19:50,649 main DEBUG Took 0.009892 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-08 11:19:50,650 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-08 11:19:50,650 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-08 11:19:50,651 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-08 11:19:50,652 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,658 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-08 11:19:50,670 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,672 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,672 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,673 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,673 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,673 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,674 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,674 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,675 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,675 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,676 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,677 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,677 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,677 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,678 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,678 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,679 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,679 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,679 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,680 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-08 11:19:50,680 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,681 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-08 11:19:50,682 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-08 11:19:50,683 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-08 11:19:50,685 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-08 11:19:50,685 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-08 11:19:50,686 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-08 11:19:50,687 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-08 11:19:50,695 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-08 11:19:50,697 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-08 11:19:50,698 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-08 11:19:50,699 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-08 11:19:50,699 main DEBUG createAppenders(={Console}) 2024-12-08 11:19:50,700 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-08 11:19:50,700 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-08 11:19:50,700 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-08 11:19:50,701 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-08 11:19:50,701 main DEBUG OutputStream closed 2024-12-08 11:19:50,701 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-08 11:19:50,701 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-08 11:19:50,702 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-08 11:19:50,767 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-08 11:19:50,769 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-08 11:19:50,770 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-08 11:19:50,771 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-08 11:19:50,771 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-08 11:19:50,772 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-08 11:19:50,772 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-08 11:19:50,772 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-08 11:19:50,773 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-08 11:19:50,773 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-08 11:19:50,773 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-08 11:19:50,773 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-08 11:19:50,774 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-08 11:19:50,774 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-08 11:19:50,774 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-08 11:19:50,775 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-08 11:19:50,775 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-08 11:19:50,776 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-08 11:19:50,778 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08 11:19:50,778 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-08 11:19:50,778 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-08 11:19:50,779 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-08T11:19:50,998 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae 2024-12-08 11:19:51,001 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-08 11:19:51,001 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-08T11:19:51,010 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-08T11:19:51,030 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-08T11:19:51,033 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9, deleteOnExit=true 2024-12-08T11:19:51,033 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-08T11:19:51,034 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/test.cache.data in system properties and HBase conf 2024-12-08T11:19:51,034 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/hadoop.tmp.dir in system properties and HBase conf 2024-12-08T11:19:51,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/hadoop.log.dir in system properties and HBase conf 2024-12-08T11:19:51,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-08T11:19:51,036 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-08T11:19:51,036 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-08T11:19:51,126 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-08T11:19:51,229 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-08T11:19:51,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-08T11:19:51,233 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-08T11:19:51,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-08T11:19:51,234 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T11:19:51,235 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-08T11:19:51,235 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-08T11:19:51,235 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-08T11:19:51,236 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T11:19:51,236 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-08T11:19:51,236 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/nfs.dump.dir in system properties and HBase conf 2024-12-08T11:19:51,237 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/java.io.tmpdir in system properties and HBase conf 2024-12-08T11:19:51,237 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-08T11:19:51,237 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-08T11:19:51,238 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-08T11:19:52,089 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-08T11:19:52,168 INFO [Time-limited test {}] log.Log(170): Logging initialized @2298ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-08T11:19:52,247 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T11:19:52,312 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T11:19:52,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T11:19:52,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T11:19:52,333 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-08T11:19:52,347 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T11:19:52,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/hadoop.log.dir/,AVAILABLE} 2024-12-08T11:19:52,350 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T11:19:52,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/java.io.tmpdir/jetty-localhost-37177-hadoop-hdfs-3_4_1-tests_jar-_-any-4985121843272685139/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T11:19:52,557 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:37177} 2024-12-08T11:19:52,558 INFO [Time-limited test {}] server.Server(415): Started @2689ms 2024-12-08T11:19:52,956 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-08T11:19:52,964 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-08T11:19:52,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-08T11:19:52,966 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-08T11:19:52,966 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-08T11:19:52,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/hadoop.log.dir/,AVAILABLE} 2024-12-08T11:19:52,968 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-08T11:19:53,095 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/java.io.tmpdir/jetty-localhost-39375-hadoop-hdfs-3_4_1-tests_jar-_-any-1917296516667715759/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T11:19:53,096 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:39375} 2024-12-08T11:19:53,096 INFO [Time-limited test {}] server.Server(415): Started @3227ms 2024-12-08T11:19:53,156 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-08T11:19:53,599 WARN [Thread-70 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/dfs/data/data2/current/BP-1692739221-172.17.0.2-1733656791850/current, will proceed with Du for space computation calculation, 2024-12-08T11:19:53,599 WARN [Thread-69 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/dfs/data/data1/current/BP-1692739221-172.17.0.2-1733656791850/current, will proceed with Du for space computation calculation, 2024-12-08T11:19:53,657 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-08T11:19:53,737 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfc3ee21d1a64339 with lease ID 0x2f13ac210a72d83: Processing first storage report for DS-00446ebe-3965-4a0f-bd76-838d1c916da5 from datanode DatanodeRegistration(127.0.0.1:42827, datanodeUuid=5d6ecd1b-00f9-49e2-800a-270a73fe2319, infoPort=33571, infoSecurePort=0, ipcPort=33953, storageInfo=lv=-57;cid=testClusterID;nsid=1271048654;c=1733656791850) 2024-12-08T11:19:53,739 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc3ee21d1a64339 with lease ID 0x2f13ac210a72d83: from storage DS-00446ebe-3965-4a0f-bd76-838d1c916da5 node DatanodeRegistration(127.0.0.1:42827, datanodeUuid=5d6ecd1b-00f9-49e2-800a-270a73fe2319, infoPort=33571, infoSecurePort=0, ipcPort=33953, storageInfo=lv=-57;cid=testClusterID;nsid=1271048654;c=1733656791850), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T11:19:53,739 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfc3ee21d1a64339 with lease ID 0x2f13ac210a72d83: Processing first storage report for DS-ca89a847-938f-49ad-80c5-6dfc8f2041c5 from datanode DatanodeRegistration(127.0.0.1:42827, datanodeUuid=5d6ecd1b-00f9-49e2-800a-270a73fe2319, infoPort=33571, infoSecurePort=0, ipcPort=33953, storageInfo=lv=-57;cid=testClusterID;nsid=1271048654;c=1733656791850) 2024-12-08T11:19:53,740 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc3ee21d1a64339 with lease ID 0x2f13ac210a72d83: from storage DS-ca89a847-938f-49ad-80c5-6dfc8f2041c5 node DatanodeRegistration(127.0.0.1:42827, datanodeUuid=5d6ecd1b-00f9-49e2-800a-270a73fe2319, infoPort=33571, infoSecurePort=0, ipcPort=33953, storageInfo=lv=-57;cid=testClusterID;nsid=1271048654;c=1733656791850), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-08T11:19:53,833 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae 2024-12-08T11:19:53,917 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/zookeeper_0, clientPort=63801, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-08T11:19:53,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63801 2024-12-08T11:19:53,937 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T11:19:53,940 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T11:19:54,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741825_1001 (size=7) 2024-12-08T11:19:54,600 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c with version=8 2024-12-08T11:19:54,600 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/hbase-staging 2024-12-08T11:19:54,734 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-08T11:19:55,012 INFO [Time-limited test {}] client.ConnectionUtils(129): master/355ef6e50110:0 server-side Connection retries=45 2024-12-08T11:19:55,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T11:19:55,033 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T11:19:55,033 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T11:19:55,034 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T11:19:55,034 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T11:19:55,174 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T11:19:55,239 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-08T11:19:55,248 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-08T11:19:55,252 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T11:19:55,280 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 7561 (auto-detected) 2024-12-08T11:19:55,282 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-08T11:19:55,302 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43409 2024-12-08T11:19:55,311 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T11:19:55,313 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T11:19:55,327 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:43409 connecting to ZooKeeper ensemble=127.0.0.1:63801 2024-12-08T11:19:55,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:434090x0, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T11:19:55,363 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:43409-0x100715b0c0a0000 connected 2024-12-08T11:19:55,396 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T11:19:55,399 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T11:19:55,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T11:19:55,406 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43409 2024-12-08T11:19:55,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43409 2024-12-08T11:19:55,407 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43409 2024-12-08T11:19:55,410 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43409 2024-12-08T11:19:55,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43409 2024-12-08T11:19:55,420 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c, hbase.cluster.distributed=false 2024-12-08T11:19:55,492 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/355ef6e50110:0 server-side Connection retries=45 2024-12-08T11:19:55,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T11:19:55,492 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-08T11:19:55,493 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-08T11:19:55,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-08T11:19:55,493 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-08T11:19:55,495 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-08T11:19:55,497 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-08T11:19:55,498 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:46083 2024-12-08T11:19:55,500 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-08T11:19:55,505 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-08T11:19:55,506 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T11:19:55,509 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T11:19:55,512 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:46083 connecting to ZooKeeper ensemble=127.0.0.1:63801 2024-12-08T11:19:55,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:460830x0, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-08T11:19:55,517 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:460830x0, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T11:19:55,517 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46083-0x100715b0c0a0001 connected 2024-12-08T11:19:55,518 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T11:19:55,519 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-08T11:19:55,520 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46083 2024-12-08T11:19:55,521 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46083 2024-12-08T11:19:55,521 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46083 2024-12-08T11:19:55,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46083 2024-12-08T11:19:55,526 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46083 2024-12-08T11:19:55,528 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/355ef6e50110,43409,1733656794727 2024-12-08T11:19:55,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T11:19:55,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T11:19:55,538 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/355ef6e50110,43409,1733656794727 2024-12-08T11:19:55,545 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;355ef6e50110:43409 2024-12-08T11:19:55,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T11:19:55,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-08T11:19:55,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:55,558 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:55,559 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T11:19:55,560 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/355ef6e50110,43409,1733656794727 from backup master directory 2024-12-08T11:19:55,561 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-08T11:19:55,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T11:19:55,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/355ef6e50110,43409,1733656794727 2024-12-08T11:19:55,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-08T11:19:55,564 WARN [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T11:19:55,564 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=355ef6e50110,43409,1733656794727 2024-12-08T11:19:55,566 INFO [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-08T11:19:55,568 INFO [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-08T11:19:55,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741826_1002 (size=42) 2024-12-08T11:19:56,041 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/hbase.id with ID: 6f3b93c3-5558-4206-b47e-64d237bd32c3 2024-12-08T11:19:56,084 INFO [master/355ef6e50110:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-08T11:19:56,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:56,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:56,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741827_1003 (size=196) 2024-12-08T11:19:56,147 INFO [master/355ef6e50110:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:19:56,149 INFO [master/355ef6e50110:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-08T11:19:56,171 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:19:56,177 INFO [master/355ef6e50110:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T11:19:56,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741828_1004 (size=1189) 2024-12-08T11:19:56,638 INFO [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store 2024-12-08T11:19:56,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741829_1005 (size=34) 2024-12-08T11:19:57,059 INFO [master/355ef6e50110:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-08T11:19:57,060 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:19:57,061 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T11:19:57,062 INFO [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:19:57,062 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:19:57,062 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T11:19:57,062 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:19:57,062 INFO [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:19:57,062 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-08T11:19:57,065 WARN [master/355ef6e50110:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/.initializing 2024-12-08T11:19:57,065 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/WALs/355ef6e50110,43409,1733656794727 2024-12-08T11:19:57,072 INFO [master/355ef6e50110:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T11:19:57,083 INFO [master/355ef6e50110:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=355ef6e50110%2C43409%2C1733656794727, suffix=, logDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/WALs/355ef6e50110,43409,1733656794727, archiveDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/oldWALs, maxLogs=10 2024-12-08T11:19:57,104 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/WALs/355ef6e50110,43409,1733656794727/355ef6e50110%2C43409%2C1733656794727.1733656797087, exclude list is [], retry=0 2024-12-08T11:19:57,121 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42827,DS-00446ebe-3965-4a0f-bd76-838d1c916da5,DISK] 2024-12-08T11:19:57,124 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-08T11:19:57,163 INFO [master/355ef6e50110:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/WALs/355ef6e50110,43409,1733656794727/355ef6e50110%2C43409%2C1733656794727.1733656797087 2024-12-08T11:19:57,164 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33571:33571)] 2024-12-08T11:19:57,165 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:19:57,165 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:19:57,168 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,169 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,233 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-08T11:19:57,237 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,240 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T11:19:57,241 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,245 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-08T11:19:57,245 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:19:57,246 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,250 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-08T11:19:57,250 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,251 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:19:57,251 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,254 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-08T11:19:57,254 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,255 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:19:57,259 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,260 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,269 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-08T11:19:57,273 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-08T11:19:57,277 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:19:57,278 INFO [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65779729, jitterRate=-0.019805654883384705}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-08T11:19:57,282 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-08T11:19:57,283 INFO [master/355ef6e50110:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-08T11:19:57,312 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d3485ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:19:57,348 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-08T11:19:57,360 INFO [master/355ef6e50110:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-08T11:19:57,360 INFO [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-08T11:19:57,363 INFO [master/355ef6e50110:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-08T11:19:57,364 INFO [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-08T11:19:57,369 INFO [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-12-08T11:19:57,370 INFO [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-08T11:19:57,395 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-08T11:19:57,407 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-08T11:19:57,409 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-08T11:19:57,412 INFO [master/355ef6e50110:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-08T11:19:57,413 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-08T11:19:57,415 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-08T11:19:57,416 INFO [master/355ef6e50110:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-08T11:19:57,420 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-08T11:19:57,422 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-08T11:19:57,423 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-08T11:19:57,425 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-08T11:19:57,434 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-08T11:19:57,436 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-08T11:19:57,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T11:19:57,440 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-08T11:19:57,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:57,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:57,441 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=355ef6e50110,43409,1733656794727, sessionid=0x100715b0c0a0000, setting cluster-up flag (Was=false) 2024-12-08T11:19:57,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:57,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:57,461 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-08T11:19:57,463 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=355ef6e50110,43409,1733656794727 2024-12-08T11:19:57,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:57,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:57,476 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-08T11:19:57,477 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=355ef6e50110,43409,1733656794727 2024-12-08T11:19:57,542 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;355ef6e50110:46083 2024-12-08T11:19:57,544 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1008): ClusterId : 6f3b93c3-5558-4206-b47e-64d237bd32c3 2024-12-08T11:19:57,547 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-08T11:19:57,552 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-08T11:19:57,552 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-08T11:19:57,556 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-08T11:19:57,556 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-08T11:19:57,557 DEBUG [RS:0;355ef6e50110:46083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37b4a1f2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:19:57,559 DEBUG [RS:0;355ef6e50110:46083 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12ac06f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=355ef6e50110/172.17.0.2:0 2024-12-08T11:19:57,562 INFO [master/355ef6e50110:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-08T11:19:57,562 INFO [RS:0;355ef6e50110:46083 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-08T11:19:57,562 INFO [RS:0;355ef6e50110:46083 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-08T11:19:57,562 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-08T11:19:57,565 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(3073): reportForDuty to master=355ef6e50110,43409,1733656794727 with isa=355ef6e50110/172.17.0.2:46083, startcode=1733656795491 2024-12-08T11:19:57,565 INFO [master/355ef6e50110:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-08T11:19:57,572 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 355ef6e50110,43409,1733656794727 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-08T11:19:57,577 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/355ef6e50110:0, corePoolSize=5, maxPoolSize=5 2024-12-08T11:19:57,577 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/355ef6e50110:0, corePoolSize=5, maxPoolSize=5 2024-12-08T11:19:57,577 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/355ef6e50110:0, corePoolSize=5, maxPoolSize=5 2024-12-08T11:19:57,578 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/355ef6e50110:0, corePoolSize=5, maxPoolSize=5 2024-12-08T11:19:57,578 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/355ef6e50110:0, corePoolSize=10, maxPoolSize=10 2024-12-08T11:19:57,578 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,578 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/355ef6e50110:0, corePoolSize=2, maxPoolSize=2 2024-12-08T11:19:57,579 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,581 DEBUG [RS:0;355ef6e50110:46083 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T11:19:57,582 INFO [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733656827582 2024-12-08T11:19:57,583 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-08T11:19:57,585 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-08T11:19:57,585 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-08T11:19:57,585 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-08T11:19:57,588 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-08T11:19:57,589 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-08T11:19:57,589 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-08T11:19:57,589 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-08T11:19:57,590 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,591 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,591 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T11:19:57,591 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-08T11:19:57,593 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-08T11:19:57,593 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-08T11:19:57,596 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-08T11:19:57,596 INFO [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-08T11:19:57,599 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/355ef6e50110:0:becomeActiveMaster-HFileCleaner.large.0-1733656797598,5,FailOnTimeoutGroup] 2024-12-08T11:19:57,600 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/355ef6e50110:0:becomeActiveMaster-HFileCleaner.small.0-1733656797599,5,FailOnTimeoutGroup] 2024-12-08T11:19:57,600 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,600 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-08T11:19:57,602 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,602 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741831_1007 (size=1039) 2024-12-08T11:19:57,607 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-08T11:19:57,608 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:19:57,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741832_1008 (size=32) 2024-12-08T11:19:57,624 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:19:57,627 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T11:19:57,628 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56463, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T11:19:57,630 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T11:19:57,631 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T11:19:57,632 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T11:19:57,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T11:19:57,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T11:19:57,637 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43409 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 355ef6e50110,46083,1733656795491 2024-12-08T11:19:57,637 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T11:19:57,640 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43409 {}] master.ServerManager(486): Registering regionserver=355ef6e50110,46083,1733656795491 2024-12-08T11:19:57,639 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T11:19:57,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:57,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T11:19:57,643 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740 2024-12-08T11:19:57,644 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740 2024-12-08T11:19:57,647 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:19:57,650 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-08T11:19:57,655 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:19:57,657 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65397361, jitterRate=-0.025503382086753845}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:19:57,657 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:19:57,657 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:36759 2024-12-08T11:19:57,658 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-08T11:19:57,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-08T11:19:57,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-08T11:19:57,661 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-08T11:19:57,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-08T11:19:57,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T11:19:57,661 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T11:19:57,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T11:19:57,663 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-08T11:19:57,663 DEBUG [RS:0;355ef6e50110:46083 {}] zookeeper.ZKUtil(111): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/355ef6e50110,46083,1733656795491 2024-12-08T11:19:57,663 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-08T11:19:57,663 WARN [RS:0;355ef6e50110:46083 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-08T11:19:57,663 INFO [RS:0;355ef6e50110:46083 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T11:19:57,663 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/WALs/355ef6e50110,46083,1733656795491 2024-12-08T11:19:57,665 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [355ef6e50110,46083,1733656795491] 2024-12-08T11:19:57,666 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-08T11:19:57,666 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-08T11:19:57,672 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-08T11:19:57,677 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-08T11:19:57,680 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-08T11:19:57,683 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-08T11:19:57,689 INFO [RS:0;355ef6e50110:46083 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-08T11:19:57,702 INFO [RS:0;355ef6e50110:46083 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-08T11:19:57,705 INFO [RS:0;355ef6e50110:46083 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-08T11:19:57,706 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,706 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-08T11:19:57,713 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,714 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,714 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,714 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,714 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,714 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,715 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/355ef6e50110:0, corePoolSize=2, maxPoolSize=2 2024-12-08T11:19:57,715 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,715 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,715 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,716 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,716 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/355ef6e50110:0, corePoolSize=1, maxPoolSize=1 2024-12-08T11:19:57,716 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/355ef6e50110:0, corePoolSize=3, maxPoolSize=3 2024-12-08T11:19:57,716 DEBUG [RS:0;355ef6e50110:46083 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0, corePoolSize=3, maxPoolSize=3 2024-12-08T11:19:57,717 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,717 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,717 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,718 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,718 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,46083,1733656795491-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T11:19:57,737 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-08T11:19:57,739 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,46083,1733656795491-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:57,758 INFO [RS:0;355ef6e50110:46083 {}] regionserver.Replication(204): 355ef6e50110,46083,1733656795491 started 2024-12-08T11:19:57,758 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1767): Serving as 355ef6e50110,46083,1733656795491, RpcServer on 355ef6e50110/172.17.0.2:46083, sessionid=0x100715b0c0a0001 2024-12-08T11:19:57,759 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-08T11:19:57,759 DEBUG [RS:0;355ef6e50110:46083 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 355ef6e50110,46083,1733656795491 2024-12-08T11:19:57,759 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '355ef6e50110,46083,1733656795491' 2024-12-08T11:19:57,759 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-08T11:19:57,760 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-08T11:19:57,761 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-08T11:19:57,761 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-08T11:19:57,761 DEBUG [RS:0;355ef6e50110:46083 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 355ef6e50110,46083,1733656795491 2024-12-08T11:19:57,761 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '355ef6e50110,46083,1733656795491' 2024-12-08T11:19:57,761 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-08T11:19:57,762 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-08T11:19:57,763 DEBUG [RS:0;355ef6e50110:46083 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-08T11:19:57,763 INFO [RS:0;355ef6e50110:46083 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-08T11:19:57,763 INFO [RS:0;355ef6e50110:46083 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-08T11:19:57,834 WARN [355ef6e50110:43409 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-08T11:19:57,869 INFO [RS:0;355ef6e50110:46083 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-08T11:19:57,873 INFO [RS:0;355ef6e50110:46083 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=355ef6e50110%2C46083%2C1733656795491, suffix=, logDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/WALs/355ef6e50110,46083,1733656795491, archiveDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/oldWALs, maxLogs=32 2024-12-08T11:19:57,898 DEBUG [RS:0;355ef6e50110:46083 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/WALs/355ef6e50110,46083,1733656795491/355ef6e50110%2C46083%2C1733656795491.1733656797876, exclude list is [], retry=0 2024-12-08T11:19:57,904 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42827,DS-00446ebe-3965-4a0f-bd76-838d1c916da5,DISK] 2024-12-08T11:19:57,907 INFO [RS:0;355ef6e50110:46083 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/WALs/355ef6e50110,46083,1733656795491/355ef6e50110%2C46083%2C1733656795491.1733656797876 2024-12-08T11:19:57,908 DEBUG [RS:0;355ef6e50110:46083 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33571:33571)] 2024-12-08T11:19:58,086 DEBUG [355ef6e50110:43409 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-08T11:19:58,091 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:19:58,096 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 355ef6e50110,46083,1733656795491, state=OPENING 2024-12-08T11:19:58,102 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-08T11:19:58,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:58,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:58,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T11:19:58,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T11:19:58,106 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:19:58,281 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:19:58,283 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-08T11:19:58,287 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-08T11:19:58,299 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-08T11:19:58,299 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-08T11:19:58,300 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-08T11:19:58,303 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=355ef6e50110%2C46083%2C1733656795491.meta, suffix=.meta, logDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/WALs/355ef6e50110,46083,1733656795491, archiveDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/oldWALs, maxLogs=32 2024-12-08T11:19:58,320 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/WALs/355ef6e50110,46083,1733656795491/355ef6e50110%2C46083%2C1733656795491.meta.1733656798305.meta, exclude list is [], retry=0 2024-12-08T11:19:58,324 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42827,DS-00446ebe-3965-4a0f-bd76-838d1c916da5,DISK] 2024-12-08T11:19:58,327 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/WALs/355ef6e50110,46083,1733656795491/355ef6e50110%2C46083%2C1733656795491.meta.1733656798305.meta 2024-12-08T11:19:58,327 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33571:33571)] 2024-12-08T11:19:58,328 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:19:58,329 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-08T11:19:58,390 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-08T11:19:58,395 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-08T11:19:58,399 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-08T11:19:58,400 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:19:58,400 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-08T11:19:58,400 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-08T11:19:58,403 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-08T11:19:58,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-08T11:19:58,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:58,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T11:19:58,406 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-08T11:19:58,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-08T11:19:58,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:58,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T11:19:58,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-08T11:19:58,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-08T11:19:58,410 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:58,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-08T11:19:58,412 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740 2024-12-08T11:19:58,415 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740 2024-12-08T11:19:58,418 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:19:58,420 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-08T11:19:58,422 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59924535, jitterRate=-0.10705484449863434}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:19:58,423 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-08T11:19:58,431 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733656798275 2024-12-08T11:19:58,442 DEBUG [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-08T11:19:58,442 INFO [RS_OPEN_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-08T11:19:58,444 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:19:58,445 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 355ef6e50110,46083,1733656795491, state=OPEN 2024-12-08T11:19:58,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T11:19:58,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-08T11:19:58,451 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T11:19:58,451 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-08T11:19:58,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-08T11:19:58,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=355ef6e50110,46083,1733656795491 in 345 msec 2024-12-08T11:19:58,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-08T11:19:58,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 785 msec 2024-12-08T11:19:58,467 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 956 msec 2024-12-08T11:19:58,468 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733656798467, completionTime=-1 2024-12-08T11:19:58,468 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-08T11:19:58,468 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-08T11:19:58,507 DEBUG [hconnection-0x15cb16c5-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:19:58,510 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:19:58,520 INFO [master/355ef6e50110:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-08T11:19:58,520 INFO [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733656858520 2024-12-08T11:19:58,521 INFO [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733656918521 2024-12-08T11:19:58,521 INFO [master/355ef6e50110:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 52 msec 2024-12-08T11:19:58,542 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,43409,1733656794727-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:58,543 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,43409,1733656794727-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:58,543 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,43409,1733656794727-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:58,544 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-355ef6e50110:43409, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:58,545 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-08T11:19:58,550 DEBUG [master/355ef6e50110:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-08T11:19:58,553 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-08T11:19:58,554 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-08T11:19:58,560 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-08T11:19:58,563 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T11:19:58,564 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:58,566 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T11:19:58,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741835_1011 (size=358) 2024-12-08T11:19:58,982 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9ba0a86166fe12d211f42156054bda9c, NAME => 'hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:19:58,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741836_1012 (size=42) 2024-12-08T11:19:59,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:19:59,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 9ba0a86166fe12d211f42156054bda9c, disabling compactions & flushes 2024-12-08T11:19:59,396 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:19:59,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:19:59,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. after waiting 0 ms 2024-12-08T11:19:59,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:19:59,396 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:19:59,396 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9ba0a86166fe12d211f42156054bda9c: 2024-12-08T11:19:59,399 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T11:19:59,406 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733656799400"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733656799400"}]},"ts":"1733656799400"} 2024-12-08T11:19:59,430 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T11:19:59,432 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T11:19:59,435 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656799432"}]},"ts":"1733656799432"} 2024-12-08T11:19:59,439 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-08T11:19:59,445 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9ba0a86166fe12d211f42156054bda9c, ASSIGN}] 2024-12-08T11:19:59,448 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9ba0a86166fe12d211f42156054bda9c, ASSIGN 2024-12-08T11:19:59,449 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=9ba0a86166fe12d211f42156054bda9c, ASSIGN; state=OFFLINE, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=false 2024-12-08T11:19:59,600 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9ba0a86166fe12d211f42156054bda9c, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:19:59,604 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 9ba0a86166fe12d211f42156054bda9c, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:19:59,758 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:19:59,765 INFO [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:19:59,765 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 9ba0a86166fe12d211f42156054bda9c, NAME => 'hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:19:59,766 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:19:59,766 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:19:59,766 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:19:59,766 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:19:59,769 INFO [StoreOpener-9ba0a86166fe12d211f42156054bda9c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:19:59,772 INFO [StoreOpener-9ba0a86166fe12d211f42156054bda9c-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9ba0a86166fe12d211f42156054bda9c columnFamilyName info 2024-12-08T11:19:59,772 DEBUG [StoreOpener-9ba0a86166fe12d211f42156054bda9c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:19:59,773 INFO [StoreOpener-9ba0a86166fe12d211f42156054bda9c-1 {}] regionserver.HStore(327): Store=9ba0a86166fe12d211f42156054bda9c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:19:59,774 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:19:59,775 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:19:59,779 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:19:59,782 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:19:59,783 INFO [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 9ba0a86166fe12d211f42156054bda9c; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68230587, jitterRate=0.016714975237846375}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-08T11:19:59,785 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 9ba0a86166fe12d211f42156054bda9c: 2024-12-08T11:19:59,787 INFO [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c., pid=6, masterSystemTime=1733656799758 2024-12-08T11:19:59,790 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:19:59,790 INFO [RS_OPEN_PRIORITY_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:19:59,791 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9ba0a86166fe12d211f42156054bda9c, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:19:59,799 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-08T11:19:59,801 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 9ba0a86166fe12d211f42156054bda9c, server=355ef6e50110,46083,1733656795491 in 191 msec 2024-12-08T11:19:59,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-08T11:19:59,803 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=9ba0a86166fe12d211f42156054bda9c, ASSIGN in 354 msec 2024-12-08T11:19:59,805 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T11:19:59,805 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656799805"}]},"ts":"1733656799805"} 2024-12-08T11:19:59,808 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-08T11:19:59,811 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T11:19:59,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2560 sec 2024-12-08T11:19:59,864 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-08T11:19:59,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-08T11:19:59,866 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:59,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:19:59,898 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-08T11:19:59,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-08T11:19:59,918 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-12-08T11:19:59,922 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-08T11:19:59,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-08T11:19:59,938 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-12-08T11:19:59,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-08T11:19:59,951 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-08T11:19:59,952 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.387sec 2024-12-08T11:19:59,953 INFO [master/355ef6e50110:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-08T11:19:59,955 INFO [master/355ef6e50110:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-08T11:19:59,956 INFO [master/355ef6e50110:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-08T11:19:59,956 INFO [master/355ef6e50110:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-08T11:19:59,956 INFO [master/355ef6e50110:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-08T11:19:59,957 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,43409,1733656794727-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-08T11:19:59,958 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,43409,1733656794727-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-08T11:19:59,964 DEBUG [master/355ef6e50110:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-08T11:19:59,965 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-08T11:19:59,966 INFO [master/355ef6e50110:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=355ef6e50110,43409,1733656794727-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-08T11:20:00,047 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x38630296 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@76523d14 2024-12-08T11:20:00,048 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-08T11:20:00,055 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75444e35, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:00,059 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-08T11:20:00,059 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-08T11:20:00,069 DEBUG [hconnection-0x25802b45-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:00,081 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:00,090 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=355ef6e50110,43409,1733656794727 2024-12-08T11:20:00,108 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=242, ProcessCount=11, AvailableMemoryMB=7432 2024-12-08T11:20:00,125 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T11:20:00,129 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T11:20:00,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:20:00,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:20:00,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:00,177 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T11:20:00,178 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:00,180 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T11:20:00,181 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-08T11:20:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T11:20:00,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741837_1013 (size=963) 2024-12-08T11:20:00,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T11:20:00,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T11:20:00,614 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:20:00,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741838_1014 (size=53) 2024-12-08T11:20:00,624 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:20:00,624 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 64a616eb95ce0fb49283d502a9d694a3, disabling compactions & flushes 2024-12-08T11:20:00,624 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,624 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,624 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. after waiting 0 ms 2024-12-08T11:20:00,624 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,624 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,625 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:00,627 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T11:20:00,627 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733656800627"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733656800627"}]},"ts":"1733656800627"} 2024-12-08T11:20:00,630 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T11:20:00,632 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T11:20:00,633 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656800632"}]},"ts":"1733656800632"} 2024-12-08T11:20:00,635 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T11:20:00,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=64a616eb95ce0fb49283d502a9d694a3, ASSIGN}] 2024-12-08T11:20:00,645 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=64a616eb95ce0fb49283d502a9d694a3, ASSIGN 2024-12-08T11:20:00,646 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=64a616eb95ce0fb49283d502a9d694a3, ASSIGN; state=OFFLINE, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=false 2024-12-08T11:20:00,797 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=64a616eb95ce0fb49283d502a9d694a3, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:00,800 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:20:00,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T11:20:00,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:00,961 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,961 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:20:00,962 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,962 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:20:00,962 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,962 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,965 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,968 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:00,969 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 64a616eb95ce0fb49283d502a9d694a3 columnFamilyName A 2024-12-08T11:20:00,969 DEBUG [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:00,970 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.HStore(327): Store=64a616eb95ce0fb49283d502a9d694a3/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:00,970 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,972 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:00,973 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 64a616eb95ce0fb49283d502a9d694a3 columnFamilyName B 2024-12-08T11:20:00,973 DEBUG [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:00,974 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.HStore(327): Store=64a616eb95ce0fb49283d502a9d694a3/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:00,974 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,976 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:00,977 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 64a616eb95ce0fb49283d502a9d694a3 columnFamilyName C 2024-12-08T11:20:00,977 DEBUG [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:00,977 INFO [StoreOpener-64a616eb95ce0fb49283d502a9d694a3-1 {}] regionserver.HStore(327): Store=64a616eb95ce0fb49283d502a9d694a3/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:00,978 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,980 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,980 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,984 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:20:00,986 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:00,990 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:20:00,991 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 64a616eb95ce0fb49283d502a9d694a3; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72300919, jitterRate=0.07736764848232269}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:20:00,993 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:00,995 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., pid=11, masterSystemTime=1733656800954 2024-12-08T11:20:00,998 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,998 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:00,999 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=64a616eb95ce0fb49283d502a9d694a3, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,005 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-08T11:20:01,006 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 in 202 msec 2024-12-08T11:20:01,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-08T11:20:01,009 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=64a616eb95ce0fb49283d502a9d694a3, ASSIGN in 363 msec 2024-12-08T11:20:01,010 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T11:20:01,010 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656801010"}]},"ts":"1733656801010"} 2024-12-08T11:20:01,013 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T11:20:01,016 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T11:20:01,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 846 msec 2024-12-08T11:20:01,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-08T11:20:01,308 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-08T11:20:01,314 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e67f019 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6fcb5f29 2024-12-08T11:20:01,318 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fdf5682, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,320 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,322 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,325 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T11:20:01,327 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53894, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T11:20:01,335 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5095ba91 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f2091cc 2024-12-08T11:20:01,339 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d38d10, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,340 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-12-08T11:20:01,346 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72b32f98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,347 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x62c43377 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18cb251d 2024-12-08T11:20:01,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,351 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-12-08T11:20:01,355 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bbb5d8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,356 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a8f4734 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@e52b42a 2024-12-08T11:20:01,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,362 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10c964e8 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9ed28bb 2024-12-08T11:20:01,365 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b5cad1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,367 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-12-08T11:20:01,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,374 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x527c6d40 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@353bc462 2024-12-08T11:20:01,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@767a8485, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,378 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-12-08T11:20:01,381 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:01,386 DEBUG [hconnection-0x6776d1b9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,387 DEBUG [hconnection-0x66d5d030-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,387 DEBUG [hconnection-0x4f76d792-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,389 DEBUG [hconnection-0x1bfd3810-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,390 DEBUG [hconnection-0x6919a470-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,393 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42396, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,393 DEBUG [hconnection-0x1107dd33-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:01,395 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42398, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,395 DEBUG [hconnection-0x4fd12293-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,395 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,397 DEBUG [hconnection-0x426356f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,398 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,398 DEBUG [hconnection-0x3b20eaa4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:01,399 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-08T11:20:01,403 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42438, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,404 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,405 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,406 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:01,409 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:01,412 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:01,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T11:20:01,419 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42464, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:01,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:01,484 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:20:01,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:01,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:01,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:01,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:01,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:01,500 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:01,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T11:20:01,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:01,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:01,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:01,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:01,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656861634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/50a6a24f32544d12bb7fe4e53f6b4eaf is 50, key is test_row_0/A:col10/1733656801456/Put/seqid=0 2024-12-08T11:20:01,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656861643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656861654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656861654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656861679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741839_1015 (size=16681) 2024-12-08T11:20:01,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/50a6a24f32544d12bb7fe4e53f6b4eaf 2024-12-08T11:20:01,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T11:20:01,787 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:01,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656861800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:01,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:01,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:01,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656861803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,813 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:01,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656861803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/099ac04d3d7d429bb2d70c99b843ca5c is 50, key is test_row_0/B:col10/1733656801456/Put/seqid=0 2024-12-08T11:20:01,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656861803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656861803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741840_1016 (size=12001) 2024-12-08T11:20:01,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/099ac04d3d7d429bb2d70c99b843ca5c 2024-12-08T11:20:01,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/34f1408c07224d0f990c8641fbb5c94d is 50, key is test_row_0/C:col10/1733656801456/Put/seqid=0 2024-12-08T11:20:01,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741841_1017 (size=12001) 2024-12-08T11:20:01,946 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/34f1408c07224d0f990c8641fbb5c94d 2024-12-08T11:20:01,960 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/50a6a24f32544d12bb7fe4e53f6b4eaf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/50a6a24f32544d12bb7fe4e53f6b4eaf 2024-12-08T11:20:01,969 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:01,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:01,974 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/50a6a24f32544d12bb7fe4e53f6b4eaf, entries=250, sequenceid=13, filesize=16.3 K 2024-12-08T11:20:01,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:01,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:01,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:01,977 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:01,980 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-08T11:20:01,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/099ac04d3d7d429bb2d70c99b843ca5c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/099ac04d3d7d429bb2d70c99b843ca5c 2024-12-08T11:20:01,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/099ac04d3d7d429bb2d70c99b843ca5c, entries=150, sequenceid=13, filesize=11.7 K 2024-12-08T11:20:01,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/34f1408c07224d0f990c8641fbb5c94d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/34f1408c07224d0f990c8641fbb5c94d 2024-12-08T11:20:02,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/34f1408c07224d0f990c8641fbb5c94d, entries=150, sequenceid=13, filesize=11.7 K 2024-12-08T11:20:02,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 64a616eb95ce0fb49283d502a9d694a3 in 538ms, sequenceid=13, compaction requested=false 2024-12-08T11:20:02,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T11:20:02,024 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-08T11:20:02,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:02,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:02,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T11:20:02,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:02,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:02,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:02,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/293eff860ed2470398355204ab198652 is 50, key is test_row_0/A:col10/1733656802038/Put/seqid=0 2024-12-08T11:20:02,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656862058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656862070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656862075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656862076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656862077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741842_1018 (size=14341) 2024-12-08T11:20:02,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/293eff860ed2470398355204ab198652 2024-12-08T11:20:02,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:02,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:02,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/d8cc6aae67124cdfa7070dc54112adea is 50, key is test_row_0/B:col10/1733656802038/Put/seqid=0 2024-12-08T11:20:02,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741843_1019 (size=12001) 2024-12-08T11:20:02,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656862181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,192 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/d8cc6aae67124cdfa7070dc54112adea 2024-12-08T11:20:02,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656862188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656862190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656862191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656862191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/9c943a7ea018451abdebd231b3d42351 is 50, key is test_row_0/C:col10/1733656802038/Put/seqid=0 2024-12-08T11:20:02,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741844_1020 (size=12001) 2024-12-08T11:20:02,247 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/9c943a7ea018451abdebd231b3d42351 2024-12-08T11:20:02,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/293eff860ed2470398355204ab198652 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/293eff860ed2470398355204ab198652 2024-12-08T11:20:02,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/293eff860ed2470398355204ab198652, entries=200, sequenceid=39, filesize=14.0 K 2024-12-08T11:20:02,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/d8cc6aae67124cdfa7070dc54112adea as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/d8cc6aae67124cdfa7070dc54112adea 2024-12-08T11:20:02,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/d8cc6aae67124cdfa7070dc54112adea, entries=150, sequenceid=39, filesize=11.7 K 2024-12-08T11:20:02,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/9c943a7ea018451abdebd231b3d42351 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9c943a7ea018451abdebd231b3d42351 2024-12-08T11:20:02,288 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:02,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:02,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9c943a7ea018451abdebd231b3d42351, entries=150, sequenceid=39, filesize=11.7 K 2024-12-08T11:20:02,305 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 64a616eb95ce0fb49283d502a9d694a3 in 263ms, sequenceid=39, compaction requested=false 2024-12-08T11:20:02,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:02,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:02,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:20:02,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:02,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:02,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:02,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7769d199242848e0a7fcf39c20bc7a7e is 50, key is test_row_1/A:col10/1733656802413/Put/seqid=0 2024-12-08T11:20:02,443 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:02,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:02,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741845_1021 (size=9657) 2024-12-08T11:20:02,489 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7769d199242848e0a7fcf39c20bc7a7e 2024-12-08T11:20:02,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656862492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656862498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656862500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/80e8427d291b400b893d3e0be2651874 is 50, key is test_row_1/B:col10/1733656802413/Put/seqid=0 2024-12-08T11:20:02,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656862502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656862507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T11:20:02,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741846_1022 (size=9657) 2024-12-08T11:20:02,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/80e8427d291b400b893d3e0be2651874 2024-12-08T11:20:02,559 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/ce394f6df090471c93b8143dccd9b9dc is 50, key is test_row_1/C:col10/1733656802413/Put/seqid=0 2024-12-08T11:20:02,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741847_1023 (size=9657) 2024-12-08T11:20:02,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/ce394f6df090471c93b8143dccd9b9dc 2024-12-08T11:20:02,600 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:02,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:02,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,602 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:02,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656862612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7769d199242848e0a7fcf39c20bc7a7e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7769d199242848e0a7fcf39c20bc7a7e 2024-12-08T11:20:02,615 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656862614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656862618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656862618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656862619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,631 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7769d199242848e0a7fcf39c20bc7a7e, entries=100, sequenceid=53, filesize=9.4 K 2024-12-08T11:20:02,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/80e8427d291b400b893d3e0be2651874 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/80e8427d291b400b893d3e0be2651874 2024-12-08T11:20:02,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/80e8427d291b400b893d3e0be2651874, entries=100, sequenceid=53, filesize=9.4 K 2024-12-08T11:20:02,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/ce394f6df090471c93b8143dccd9b9dc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ce394f6df090471c93b8143dccd9b9dc 2024-12-08T11:20:02,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ce394f6df090471c93b8143dccd9b9dc, entries=100, sequenceid=53, filesize=9.4 K 2024-12-08T11:20:02,668 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 64a616eb95ce0fb49283d502a9d694a3 in 252ms, sequenceid=53, compaction requested=true 2024-12-08T11:20:02,668 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:02,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:02,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:02,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:02,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:02,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:02,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:02,680 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:02,680 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:02,684 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:02,686 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:02,686 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,686 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/099ac04d3d7d429bb2d70c99b843ca5c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/d8cc6aae67124cdfa7070dc54112adea, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/80e8427d291b400b893d3e0be2651874] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=32.9 K 2024-12-08T11:20:02,688 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 099ac04d3d7d429bb2d70c99b843ca5c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733656801456 2024-12-08T11:20:02,689 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40679 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:02,689 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:02,689 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d8cc6aae67124cdfa7070dc54112adea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733656801609 2024-12-08T11:20:02,689 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,690 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/50a6a24f32544d12bb7fe4e53f6b4eaf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/293eff860ed2470398355204ab198652, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7769d199242848e0a7fcf39c20bc7a7e] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=39.7 K 2024-12-08T11:20:02,696 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50a6a24f32544d12bb7fe4e53f6b4eaf, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733656801456 2024-12-08T11:20:02,696 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 80e8427d291b400b893d3e0be2651874, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656802413 2024-12-08T11:20:02,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 293eff860ed2470398355204ab198652, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733656801609 2024-12-08T11:20:02,699 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7769d199242848e0a7fcf39c20bc7a7e, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656802413 2024-12-08T11:20:02,731 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#9 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:02,732 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#10 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:02,733 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/13860169c0734ce2a4fe3b7bfdc2ffbb is 50, key is test_row_0/A:col10/1733656802038/Put/seqid=0 2024-12-08T11:20:02,733 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f374c8961859440fbeee7e5996b8db06 is 50, key is test_row_0/B:col10/1733656802038/Put/seqid=0 2024-12-08T11:20:02,757 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-08T11:20:02,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,758 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:20:02,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:02,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:02,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:02,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:02,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741848_1024 (size=12104) 2024-12-08T11:20:02,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741849_1025 (size=12104) 2024-12-08T11:20:02,784 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/13860169c0734ce2a4fe3b7bfdc2ffbb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/13860169c0734ce2a4fe3b7bfdc2ffbb 2024-12-08T11:20:02,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7fc72ebad50c417fade96ad1f982be65 is 50, key is test_row_0/A:col10/1733656802497/Put/seqid=0 2024-12-08T11:20:02,786 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f374c8961859440fbeee7e5996b8db06 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f374c8961859440fbeee7e5996b8db06 2024-12-08T11:20:02,806 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into f374c8961859440fbeee7e5996b8db06(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:02,807 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:02,807 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656802679; duration=0sec 2024-12-08T11:20:02,807 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 13860169c0734ce2a4fe3b7bfdc2ffbb(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:02,807 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:02,807 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656802670; duration=0sec 2024-12-08T11:20:02,807 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:02,807 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:02,807 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:02,807 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:02,808 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:02,811 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:02,812 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:02,812 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:02,812 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/34f1408c07224d0f990c8641fbb5c94d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9c943a7ea018451abdebd231b3d42351, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ce394f6df090471c93b8143dccd9b9dc] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=32.9 K 2024-12-08T11:20:02,813 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 34f1408c07224d0f990c8641fbb5c94d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1733656801456 2024-12-08T11:20:02,814 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c943a7ea018451abdebd231b3d42351, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733656801609 2024-12-08T11:20:02,815 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ce394f6df090471c93b8143dccd9b9dc, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656802413 2024-12-08T11:20:02,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741850_1026 (size=12001) 2024-12-08T11:20:02,819 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7fc72ebad50c417fade96ad1f982be65 2024-12-08T11:20:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:02,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:02,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656862848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656862850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656862851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656862854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656862855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,868 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#12 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:02,869 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/27810acbe6174030b6259bb75cc2523e is 50, key is test_row_0/C:col10/1733656802038/Put/seqid=0 2024-12-08T11:20:02,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b90485eb48174693b2ea162d84902077 is 50, key is test_row_0/B:col10/1733656802497/Put/seqid=0 2024-12-08T11:20:02,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741851_1027 (size=12104) 2024-12-08T11:20:02,909 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/27810acbe6174030b6259bb75cc2523e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/27810acbe6174030b6259bb75cc2523e 2024-12-08T11:20:02,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741852_1028 (size=12001) 2024-12-08T11:20:02,924 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into 27810acbe6174030b6259bb75cc2523e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:02,924 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:02,924 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656802679; duration=0sec 2024-12-08T11:20:02,924 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:02,924 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:02,965 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656862959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656862962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,968 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656862962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,969 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656862964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:02,969 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:02,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656862965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656863173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656863174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656863175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656863170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656863177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,320 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b90485eb48174693b2ea162d84902077 2024-12-08T11:20:03,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c79db04aca494941b4a086f366ea8ea2 is 50, key is test_row_0/C:col10/1733656802497/Put/seqid=0 2024-12-08T11:20:03,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741853_1029 (size=12001) 2024-12-08T11:20:03,366 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c79db04aca494941b4a086f366ea8ea2 2024-12-08T11:20:03,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7fc72ebad50c417fade96ad1f982be65 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fc72ebad50c417fade96ad1f982be65 2024-12-08T11:20:03,428 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fc72ebad50c417fade96ad1f982be65, entries=150, sequenceid=75, filesize=11.7 K 2024-12-08T11:20:03,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b90485eb48174693b2ea162d84902077 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b90485eb48174693b2ea162d84902077 2024-12-08T11:20:03,446 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b90485eb48174693b2ea162d84902077, entries=150, sequenceid=75, filesize=11.7 K 2024-12-08T11:20:03,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c79db04aca494941b4a086f366ea8ea2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c79db04aca494941b4a086f366ea8ea2 2024-12-08T11:20:03,469 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c79db04aca494941b4a086f366ea8ea2, entries=150, sequenceid=75, filesize=11.7 K 2024-12-08T11:20:03,479 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 64a616eb95ce0fb49283d502a9d694a3 in 721ms, sequenceid=75, compaction requested=false 2024-12-08T11:20:03,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:03,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:03,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-08T11:20:03,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-08T11:20:03,487 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-08T11:20:03,487 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0710 sec 2024-12-08T11:20:03,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T11:20:03,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:03,492 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 2.0920 sec 2024-12-08T11:20:03,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:03,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:03,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:03,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a925913452d74124bcb0244a96640f3e is 50, key is test_row_0/A:col10/1733656803487/Put/seqid=0 2024-12-08T11:20:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-08T11:20:03,528 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-08T11:20:03,531 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:03,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-08T11:20:03,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T11:20:03,535 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:03,541 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:03,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:03,545 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656863535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741854_1030 (size=14341) 2024-12-08T11:20:03,545 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656863536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656863543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a925913452d74124bcb0244a96640f3e 2024-12-08T11:20:03,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656863545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,555 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656863546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/930aa1e4e2194cb18648930248847067 is 50, key is test_row_0/B:col10/1733656803487/Put/seqid=0 2024-12-08T11:20:03,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741855_1031 (size=12001) 2024-12-08T11:20:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T11:20:03,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656863648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,658 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656863656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656863657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656863659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656863659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,698 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,698 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-08T11:20:03,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:03,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:03,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:03,699 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:03,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:03,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T11:20:03,854 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-08T11:20:03,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:03,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:03,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:03,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:03,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:03,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:03,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656863853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656863862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656863863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656863865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:03,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656863866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:03,912 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T11:20:04,009 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-08T11:20:04,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:04,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:04,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:04,010 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:04,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:04,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:04,016 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-08T11:20:04,018 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-08T11:20:04,020 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/930aa1e4e2194cb18648930248847067 2024-12-08T11:20:04,039 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a is 50, key is test_row_0/C:col10/1733656803487/Put/seqid=0 2024-12-08T11:20:04,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741856_1032 (size=12001) 2024-12-08T11:20:04,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a 2024-12-08T11:20:04,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a925913452d74124bcb0244a96640f3e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a925913452d74124bcb0244a96640f3e 2024-12-08T11:20:04,078 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a925913452d74124bcb0244a96640f3e, entries=200, sequenceid=95, filesize=14.0 K 2024-12-08T11:20:04,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/930aa1e4e2194cb18648930248847067 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/930aa1e4e2194cb18648930248847067 2024-12-08T11:20:04,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/930aa1e4e2194cb18648930248847067, entries=150, sequenceid=95, filesize=11.7 K 2024-12-08T11:20:04,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a 2024-12-08T11:20:04,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a, entries=150, sequenceid=95, filesize=11.7 K 2024-12-08T11:20:04,108 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 64a616eb95ce0fb49283d502a9d694a3 in 618ms, sequenceid=95, compaction requested=true 2024-12-08T11:20:04,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:04,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:04,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:04,109 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:04,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:04,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:04,109 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:04,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:04,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:04,111 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:04,111 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:04,111 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:04,112 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f374c8961859440fbeee7e5996b8db06, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b90485eb48174693b2ea162d84902077, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/930aa1e4e2194cb18648930248847067] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=35.3 K 2024-12-08T11:20:04,113 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:04,113 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:04,113 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:04,113 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/13860169c0734ce2a4fe3b7bfdc2ffbb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fc72ebad50c417fade96ad1f982be65, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a925913452d74124bcb0244a96640f3e] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.5 K 2024-12-08T11:20:04,114 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f374c8961859440fbeee7e5996b8db06, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656802038 2024-12-08T11:20:04,115 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b90485eb48174693b2ea162d84902077, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733656802493 2024-12-08T11:20:04,115 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13860169c0734ce2a4fe3b7bfdc2ffbb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656802038 2024-12-08T11:20:04,116 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 930aa1e4e2194cb18648930248847067, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733656803487 2024-12-08T11:20:04,117 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fc72ebad50c417fade96ad1f982be65, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733656802493 2024-12-08T11:20:04,120 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a925913452d74124bcb0244a96640f3e, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733656802848 2024-12-08T11:20:04,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T11:20:04,158 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:04,160 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bedb38e2df7946a68acc7a18ef3145fe is 50, key is test_row_0/A:col10/1733656803487/Put/seqid=0 2024-12-08T11:20:04,164 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-08T11:20:04,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:04,165 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:20:04,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:04,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:04,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:04,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:04,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:04,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:04,168 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:04,169 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/bf8a1971bfce4a7ea8c58d8d11e346a7 is 50, key is test_row_0/B:col10/1733656803487/Put/seqid=0 2024-12-08T11:20:04,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:04,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:04,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/efac7e74ad4f4680894518ce2c5d60db is 50, key is test_row_0/A:col10/1733656803541/Put/seqid=0 2024-12-08T11:20:04,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741857_1033 (size=12207) 2024-12-08T11:20:04,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741858_1034 (size=12207) 2024-12-08T11:20:04,210 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/bf8a1971bfce4a7ea8c58d8d11e346a7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bf8a1971bfce4a7ea8c58d8d11e346a7 2024-12-08T11:20:04,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741859_1035 (size=12001) 2024-12-08T11:20:04,217 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/efac7e74ad4f4680894518ce2c5d60db 2024-12-08T11:20:04,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7f641b9e61d94a95aad4e03db73bbb34 is 50, key is test_row_0/B:col10/1733656803541/Put/seqid=0 2024-12-08T11:20:04,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741860_1036 (size=12001) 2024-12-08T11:20:04,258 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into bf8a1971bfce4a7ea8c58d8d11e346a7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:04,258 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:04,258 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656804109; duration=0sec 2024-12-08T11:20:04,259 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:04,259 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:04,259 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:04,262 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:04,262 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:04,262 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:04,262 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/27810acbe6174030b6259bb75cc2523e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c79db04aca494941b4a086f366ea8ea2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=35.3 K 2024-12-08T11:20:04,263 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 27810acbe6174030b6259bb75cc2523e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656802038 2024-12-08T11:20:04,264 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c79db04aca494941b4a086f366ea8ea2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733656802493 2024-12-08T11:20:04,265 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting fb70e39fb9dc4b2c9abf5ceaae7f8c9a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733656803487 2024-12-08T11:20:04,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656864276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656864276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656864282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656864284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656864284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,302 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#22 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:04,303 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/e43786be13504d1991302eb944caba07 is 50, key is test_row_0/C:col10/1733656803487/Put/seqid=0 2024-12-08T11:20:04,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741861_1037 (size=12207) 2024-12-08T11:20:04,343 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/e43786be13504d1991302eb944caba07 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/e43786be13504d1991302eb944caba07 2024-12-08T11:20:04,365 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into e43786be13504d1991302eb944caba07(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:04,366 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:04,366 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656804109; duration=0sec 2024-12-08T11:20:04,366 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:04,366 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:04,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656864387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656864388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656864390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656864390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656864391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656864594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656864595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,599 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656864597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656864597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656864597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,606 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bedb38e2df7946a68acc7a18ef3145fe as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bedb38e2df7946a68acc7a18ef3145fe 2024-12-08T11:20:04,618 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into bedb38e2df7946a68acc7a18ef3145fe(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:04,619 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:04,619 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656804109; duration=0sec 2024-12-08T11:20:04,619 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:04,619 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:04,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T11:20:04,644 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7f641b9e61d94a95aad4e03db73bbb34 2024-12-08T11:20:04,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/8bcbd0fa20d34245ac90026d9829d8ad is 50, key is test_row_0/C:col10/1733656803541/Put/seqid=0 2024-12-08T11:20:04,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741862_1038 (size=12001) 2024-12-08T11:20:04,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656864900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656864902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656864902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656864906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:04,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:04,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656864906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,108 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/8bcbd0fa20d34245ac90026d9829d8ad 2024-12-08T11:20:05,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/efac7e74ad4f4680894518ce2c5d60db as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/efac7e74ad4f4680894518ce2c5d60db 2024-12-08T11:20:05,132 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/efac7e74ad4f4680894518ce2c5d60db, entries=150, sequenceid=115, filesize=11.7 K 2024-12-08T11:20:05,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7f641b9e61d94a95aad4e03db73bbb34 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7f641b9e61d94a95aad4e03db73bbb34 2024-12-08T11:20:05,147 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7f641b9e61d94a95aad4e03db73bbb34, entries=150, sequenceid=115, filesize=11.7 K 2024-12-08T11:20:05,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/8bcbd0fa20d34245ac90026d9829d8ad as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/8bcbd0fa20d34245ac90026d9829d8ad 2024-12-08T11:20:05,162 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/8bcbd0fa20d34245ac90026d9829d8ad, entries=150, sequenceid=115, filesize=11.7 K 2024-12-08T11:20:05,164 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 64a616eb95ce0fb49283d502a9d694a3 in 999ms, sequenceid=115, compaction requested=false 2024-12-08T11:20:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-08T11:20:05,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-08T11:20:05,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-08T11:20:05,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6260 sec 2024-12-08T11:20:05,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.6420 sec 2024-12-08T11:20:05,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-08T11:20:05,237 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-08T11:20:05,239 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-08T11:20:05,239 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-08T11:20:05,241 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T11:20:05,241 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-08T11:20:05,242 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-08T11:20:05,260 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-08T11:20:05,262 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-08T11:20:05,262 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-08T11:20:05,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:05,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:20:05,418 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:05,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:05,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:05,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:05,419 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:05,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:05,440 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/47f217a6f12b497d8b493a491e3f50aa is 50, key is test_row_0/A:col10/1733656804276/Put/seqid=0 2024-12-08T11:20:05,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656865451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656865452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656865455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,461 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656865457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656865458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741863_1039 (size=16931) 2024-12-08T11:20:05,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/47f217a6f12b497d8b493a491e3f50aa 2024-12-08T11:20:05,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/fd2e13716169465dbff2182404c9e864 is 50, key is test_row_0/B:col10/1733656804276/Put/seqid=0 2024-12-08T11:20:05,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741864_1040 (size=12151) 2024-12-08T11:20:05,562 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/fd2e13716169465dbff2182404c9e864 2024-12-08T11:20:05,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656865562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,569 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656865563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656865564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656865564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656865563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,591 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/201fd910304e4cf18211e19120174e59 is 50, key is test_row_0/C:col10/1733656804276/Put/seqid=0 2024-12-08T11:20:05,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741865_1041 (size=12151) 2024-12-08T11:20:05,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/201fd910304e4cf18211e19120174e59 2024-12-08T11:20:05,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-08T11:20:05,645 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-08T11:20:05,647 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-08T11:20:05,650 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:05,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T11:20:05,651 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:05,653 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:05,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/47f217a6f12b497d8b493a491e3f50aa as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/47f217a6f12b497d8b493a491e3f50aa 2024-12-08T11:20:05,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/47f217a6f12b497d8b493a491e3f50aa, entries=250, sequenceid=137, filesize=16.5 K 2024-12-08T11:20:05,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/fd2e13716169465dbff2182404c9e864 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/fd2e13716169465dbff2182404c9e864 2024-12-08T11:20:05,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/fd2e13716169465dbff2182404c9e864, entries=150, sequenceid=137, filesize=11.9 K 2024-12-08T11:20:05,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/201fd910304e4cf18211e19120174e59 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/201fd910304e4cf18211e19120174e59 2024-12-08T11:20:05,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/201fd910304e4cf18211e19120174e59, entries=150, sequenceid=137, filesize=11.9 K 2024-12-08T11:20:05,696 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 64a616eb95ce0fb49283d502a9d694a3 in 280ms, sequenceid=137, compaction requested=true 2024-12-08T11:20:05,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:05,696 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:05,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41139 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:05,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:05,698 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,698 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bedb38e2df7946a68acc7a18ef3145fe, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/efac7e74ad4f4680894518ce2c5d60db, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/47f217a6f12b497d8b493a491e3f50aa] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=40.2 K 2024-12-08T11:20:05,699 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting bedb38e2df7946a68acc7a18ef3145fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733656803487 2024-12-08T11:20:05,700 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting efac7e74ad4f4680894518ce2c5d60db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733656803510 2024-12-08T11:20:05,700 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47f217a6f12b497d8b493a491e3f50aa, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733656804276 2024-12-08T11:20:05,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:05,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:05,703 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:05,705 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:05,706 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:05,706 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,706 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bf8a1971bfce4a7ea8c58d8d11e346a7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7f641b9e61d94a95aad4e03db73bbb34, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/fd2e13716169465dbff2182404c9e864] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=35.5 K 2024-12-08T11:20:05,706 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting bf8a1971bfce4a7ea8c58d8d11e346a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733656803487 2024-12-08T11:20:05,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:05,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:05,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:05,707 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:05,708 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f641b9e61d94a95aad4e03db73bbb34, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733656803510 2024-12-08T11:20:05,708 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting fd2e13716169465dbff2182404c9e864, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733656804276 2024-12-08T11:20:05,730 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#27 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:05,731 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/6dd5ae5fc8554ea68ce824d77c7f1f11 is 50, key is test_row_0/A:col10/1733656804276/Put/seqid=0 2024-12-08T11:20:05,739 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:05,739 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/3d124f7e89f14e4f9f32ead41ec7f4d2 is 50, key is test_row_0/B:col10/1733656804276/Put/seqid=0 2024-12-08T11:20:05,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T11:20:05,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:05,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T11:20:05,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:05,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:05,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:05,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:05,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:05,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:05,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741866_1042 (size=12459) 2024-12-08T11:20:05,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741867_1043 (size=12459) 2024-12-08T11:20:05,806 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-08T11:20:05,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:05,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:05,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:05,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:05,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/6d23ca4781a14b71ad7d137a9d73ee54 is 50, key is test_row_0/A:col10/1733656805455/Put/seqid=0 2024-12-08T11:20:05,818 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/6dd5ae5fc8554ea68ce824d77c7f1f11 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6dd5ae5fc8554ea68ce824d77c7f1f11 2024-12-08T11:20:05,818 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/3d124f7e89f14e4f9f32ead41ec7f4d2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3d124f7e89f14e4f9f32ead41ec7f4d2 2024-12-08T11:20:05,830 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 6dd5ae5fc8554ea68ce824d77c7f1f11(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:05,830 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:05,830 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656805696; duration=0sec 2024-12-08T11:20:05,831 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:05,831 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:05,831 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:05,831 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into 3d124f7e89f14e4f9f32ead41ec7f4d2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:05,831 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:05,831 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656805703; duration=0sec 2024-12-08T11:20:05,832 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:05,833 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:05,834 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:05,834 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:05,834 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,834 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/e43786be13504d1991302eb944caba07, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/8bcbd0fa20d34245ac90026d9829d8ad, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/201fd910304e4cf18211e19120174e59] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=35.5 K 2024-12-08T11:20:05,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656865825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,835 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e43786be13504d1991302eb944caba07, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1733656803487 2024-12-08T11:20:05,836 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8bcbd0fa20d34245ac90026d9829d8ad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733656803510 2024-12-08T11:20:05,838 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 201fd910304e4cf18211e19120174e59, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733656804276 2024-12-08T11:20:05,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656865828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656865828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656865832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656865835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741868_1044 (size=14541) 2024-12-08T11:20:05,865 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#30 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:05,866 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/77ef602f9d2d4ee2aaf7d99e011f5445 is 50, key is test_row_0/C:col10/1733656804276/Put/seqid=0 2024-12-08T11:20:05,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741869_1045 (size=12459) 2024-12-08T11:20:05,899 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/77ef602f9d2d4ee2aaf7d99e011f5445 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/77ef602f9d2d4ee2aaf7d99e011f5445 2024-12-08T11:20:05,916 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into 77ef602f9d2d4ee2aaf7d99e011f5445(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:05,916 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:05,916 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656805707; duration=0sec 2024-12-08T11:20:05,916 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:05,916 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:05,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656865937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656865943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656865945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656865945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:05,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656865948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T11:20:05,962 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:05,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-08T11:20:05,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:05,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:05,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:05,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:05,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,117 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-08T11:20:06,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:06,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,119 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656866144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656866152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656866151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656866152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656866156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T11:20:06,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/6d23ca4781a14b71ad7d137a9d73ee54 2024-12-08T11:20:06,272 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-08T11:20:06,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:06,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,275 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/cb304774268a4393ab85fece6c78cf1b is 50, key is test_row_0/B:col10/1733656805455/Put/seqid=0 2024-12-08T11:20:06,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741870_1046 (size=12151) 2024-12-08T11:20:06,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/cb304774268a4393ab85fece6c78cf1b 2024-12-08T11:20:06,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c86d5bd7027f4bb786aa32c7654e76bb is 50, key is test_row_0/C:col10/1733656805455/Put/seqid=0 2024-12-08T11:20:06,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741871_1047 (size=12151) 2024-12-08T11:20:06,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c86d5bd7027f4bb786aa32c7654e76bb 2024-12-08T11:20:06,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/6d23ca4781a14b71ad7d137a9d73ee54 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6d23ca4781a14b71ad7d137a9d73ee54 2024-12-08T11:20:06,385 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6d23ca4781a14b71ad7d137a9d73ee54, entries=200, sequenceid=156, filesize=14.2 K 2024-12-08T11:20:06,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/cb304774268a4393ab85fece6c78cf1b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/cb304774268a4393ab85fece6c78cf1b 2024-12-08T11:20:06,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/cb304774268a4393ab85fece6c78cf1b, entries=150, sequenceid=156, filesize=11.9 K 2024-12-08T11:20:06,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c86d5bd7027f4bb786aa32c7654e76bb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c86d5bd7027f4bb786aa32c7654e76bb 2024-12-08T11:20:06,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c86d5bd7027f4bb786aa32c7654e76bb, entries=150, sequenceid=156, filesize=11.9 K 2024-12-08T11:20:06,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 64a616eb95ce0fb49283d502a9d694a3 in 618ms, sequenceid=156, compaction requested=false 2024-12-08T11:20:06,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:06,431 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-08T11:20:06,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,433 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:20:06,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:06,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:06,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:06,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:06,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:06,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:06,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a4da11bc4b734cf89a713e365071ae7e is 50, key is test_row_0/A:col10/1733656805824/Put/seqid=0 2024-12-08T11:20:06,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:06,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:06,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741872_1048 (size=12151) 2024-12-08T11:20:06,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656866477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656866478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,485 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a4da11bc4b734cf89a713e365071ae7e 2024-12-08T11:20:06,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656866481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656866483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656866485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/bb1fd38f3f994c6f8008c79a3fb4ded4 is 50, key is test_row_0/B:col10/1733656805824/Put/seqid=0 2024-12-08T11:20:06,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741873_1049 (size=12151) 2024-12-08T11:20:06,539 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/bb1fd38f3f994c6f8008c79a3fb4ded4 2024-12-08T11:20:06,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/9602a33cdbe343f991644f264ca0b151 is 50, key is test_row_0/C:col10/1733656805824/Put/seqid=0 2024-12-08T11:20:06,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656866586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,592 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656866588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656866588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656866591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656866592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741874_1050 (size=12151) 2024-12-08T11:20:06,601 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/9602a33cdbe343f991644f264ca0b151 2024-12-08T11:20:06,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a4da11bc4b734cf89a713e365071ae7e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a4da11bc4b734cf89a713e365071ae7e 2024-12-08T11:20:06,624 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a4da11bc4b734cf89a713e365071ae7e, entries=150, sequenceid=176, filesize=11.9 K 2024-12-08T11:20:06,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/bb1fd38f3f994c6f8008c79a3fb4ded4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bb1fd38f3f994c6f8008c79a3fb4ded4 2024-12-08T11:20:06,638 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bb1fd38f3f994c6f8008c79a3fb4ded4, entries=150, sequenceid=176, filesize=11.9 K 2024-12-08T11:20:06,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/9602a33cdbe343f991644f264ca0b151 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9602a33cdbe343f991644f264ca0b151 2024-12-08T11:20:06,653 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9602a33cdbe343f991644f264ca0b151, entries=150, sequenceid=176, filesize=11.9 K 2024-12-08T11:20:06,655 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 64a616eb95ce0fb49283d502a9d694a3 in 222ms, sequenceid=176, compaction requested=true 2024-12-08T11:20:06,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:06,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-08T11:20:06,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-08T11:20:06,661 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-08T11:20:06,662 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0040 sec 2024-12-08T11:20:06,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.0160 sec 2024-12-08T11:20:06,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-08T11:20:06,758 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-08T11:20:06,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:06,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-08T11:20:06,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T11:20:06,764 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:06,765 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:06,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:06,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:20:06,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:06,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:06,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:06,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:06,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:06,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:06,799 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:06,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/1ead987fc16641d5a53b72d55a1f7b99 is 50, key is test_row_0/A:col10/1733656806795/Put/seqid=0 2024-12-08T11:20:06,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656866820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656866820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656866825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656866822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656866828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741875_1051 (size=12151) 2024-12-08T11:20:06,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T11:20:06,921 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T11:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:06,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:06,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656866929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656866931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656866933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656866934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:06,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:06,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656866935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T11:20:07,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T11:20:07,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:07,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,079 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656867135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656867136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,139 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656867137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656867139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656867142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,233 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T11:20:07,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:07,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/1ead987fc16641d5a53b72d55a1f7b99 2024-12-08T11:20:07,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0c918b4b206f43edbc8a6b0966bd2139 is 50, key is test_row_0/B:col10/1733656806795/Put/seqid=0 2024-12-08T11:20:07,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741876_1052 (size=12151) 2024-12-08T11:20:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T11:20:07,394 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T11:20:07,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:07,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656867440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656867441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656867446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656867447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656867447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T11:20:07,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:07,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,552 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,707 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T11:20:07,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:07,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,709 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,712 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0c918b4b206f43edbc8a6b0966bd2139 2024-12-08T11:20:07,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:07,729 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/d8b6fd1e36df4c8589cc5d034134b82c is 50, key is test_row_0/C:col10/1733656806795/Put/seqid=0 2024-12-08T11:20:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741877_1053 (size=12151) 2024-12-08T11:20:07,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/d8b6fd1e36df4c8589cc5d034134b82c 2024-12-08T11:20:07,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/1ead987fc16641d5a53b72d55a1f7b99 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/1ead987fc16641d5a53b72d55a1f7b99 2024-12-08T11:20:07,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/1ead987fc16641d5a53b72d55a1f7b99, entries=150, sequenceid=197, filesize=11.9 K 2024-12-08T11:20:07,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0c918b4b206f43edbc8a6b0966bd2139 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0c918b4b206f43edbc8a6b0966bd2139 2024-12-08T11:20:07,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0c918b4b206f43edbc8a6b0966bd2139, entries=150, sequenceid=197, filesize=11.9 K 2024-12-08T11:20:07,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/d8b6fd1e36df4c8589cc5d034134b82c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/d8b6fd1e36df4c8589cc5d034134b82c 2024-12-08T11:20:07,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/d8b6fd1e36df4c8589cc5d034134b82c, entries=150, sequenceid=197, filesize=11.9 K 2024-12-08T11:20:07,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 64a616eb95ce0fb49283d502a9d694a3 in 1018ms, sequenceid=197, compaction requested=true 2024-12-08T11:20:07,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:07,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:07,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:07,816 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:07,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:07,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:07,816 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:07,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:07,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:07,819 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51302 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:07,819 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48912 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:07,819 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:07,819 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:07,820 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,820 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,820 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6dd5ae5fc8554ea68ce824d77c7f1f11, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6d23ca4781a14b71ad7d137a9d73ee54, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a4da11bc4b734cf89a713e365071ae7e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/1ead987fc16641d5a53b72d55a1f7b99] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=50.1 K 2024-12-08T11:20:07,820 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3d124f7e89f14e4f9f32ead41ec7f4d2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/cb304774268a4393ab85fece6c78cf1b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bb1fd38f3f994c6f8008c79a3fb4ded4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0c918b4b206f43edbc8a6b0966bd2139] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=47.8 K 2024-12-08T11:20:07,821 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6dd5ae5fc8554ea68ce824d77c7f1f11, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733656804276 2024-12-08T11:20:07,821 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d124f7e89f14e4f9f32ead41ec7f4d2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733656804276 2024-12-08T11:20:07,821 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting cb304774268a4393ab85fece6c78cf1b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733656805452 2024-12-08T11:20:07,822 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d23ca4781a14b71ad7d137a9d73ee54, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733656805450 2024-12-08T11:20:07,822 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting bb1fd38f3f994c6f8008c79a3fb4ded4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656805820 2024-12-08T11:20:07,823 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4da11bc4b734cf89a713e365071ae7e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656805820 2024-12-08T11:20:07,823 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c918b4b206f43edbc8a6b0966bd2139, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656806481 2024-12-08T11:20:07,823 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ead987fc16641d5a53b72d55a1f7b99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656806481 2024-12-08T11:20:07,842 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:07,843 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#40 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:07,843 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/4e0e99048778416f9a8f56ac9d3879ae is 50, key is test_row_0/A:col10/1733656806795/Put/seqid=0 2024-12-08T11:20:07,843 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/ac9ff35427094a83921aba92f7d2ea1f is 50, key is test_row_0/B:col10/1733656806795/Put/seqid=0 2024-12-08T11:20:07,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741878_1054 (size=12595) 2024-12-08T11:20:07,865 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-08T11:20:07,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:07,866 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T11:20:07,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:07,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741879_1055 (size=12595) 2024-12-08T11:20:07,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:07,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:07,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:07,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:07,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:07,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T11:20:07,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fce719e2bf744415bfeb7322ab54dfeb is 50, key is test_row_0/A:col10/1733656806820/Put/seqid=0 2024-12-08T11:20:07,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741880_1056 (size=12151) 2024-12-08T11:20:07,909 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fce719e2bf744415bfeb7322ab54dfeb 2024-12-08T11:20:07,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/29680b311e644785ab5a57f4390faefa is 50, key is test_row_0/B:col10/1733656806820/Put/seqid=0 2024-12-08T11:20:07,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741881_1057 (size=12151) 2024-12-08T11:20:07,952 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/29680b311e644785ab5a57f4390faefa 2024-12-08T11:20:07,956 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:07,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:07,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/a323098ff7bc48ddab81d666e7efcc2f is 50, key is test_row_0/C:col10/1733656806820/Put/seqid=0 2024-12-08T11:20:07,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656867992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656867993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:07,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:07,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656867995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656867995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656867997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741882_1058 (size=12151) 2024-12-08T11:20:08,008 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/a323098ff7bc48ddab81d666e7efcc2f 2024-12-08T11:20:08,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fce719e2bf744415bfeb7322ab54dfeb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fce719e2bf744415bfeb7322ab54dfeb 2024-12-08T11:20:08,025 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fce719e2bf744415bfeb7322ab54dfeb, entries=150, sequenceid=212, filesize=11.9 K 2024-12-08T11:20:08,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/29680b311e644785ab5a57f4390faefa as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/29680b311e644785ab5a57f4390faefa 2024-12-08T11:20:08,036 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/29680b311e644785ab5a57f4390faefa, entries=150, sequenceid=212, filesize=11.9 K 2024-12-08T11:20:08,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/a323098ff7bc48ddab81d666e7efcc2f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a323098ff7bc48ddab81d666e7efcc2f 2024-12-08T11:20:08,045 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a323098ff7bc48ddab81d666e7efcc2f, entries=150, sequenceid=212, filesize=11.9 K 2024-12-08T11:20:08,047 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 64a616eb95ce0fb49283d502a9d694a3 in 181ms, sequenceid=212, compaction requested=true 2024-12-08T11:20:08,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:08,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:08,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-08T11:20:08,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-08T11:20:08,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-08T11:20:08,052 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2830 sec 2024-12-08T11:20:08,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.2930 sec 2024-12-08T11:20:08,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:08,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:20:08,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:08,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:08,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:08,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:08,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:08,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:08,112 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/10af5c4d365f4be79208df88c0aa95f7 is 50, key is test_row_0/A:col10/1733656807993/Put/seqid=0 2024-12-08T11:20:08,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656868122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656868125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656868126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656868127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656868129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741883_1059 (size=14541) 2024-12-08T11:20:08,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/10af5c4d365f4be79208df88c0aa95f7 2024-12-08T11:20:08,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/be33ef0c7d874b5ab2a218ce53453c7f is 50, key is test_row_0/B:col10/1733656807993/Put/seqid=0 2024-12-08T11:20:08,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741884_1060 (size=12151) 2024-12-08T11:20:08,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/be33ef0c7d874b5ab2a218ce53453c7f 2024-12-08T11:20:08,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/b2b60fca242640df8c1180ce867fdaba is 50, key is test_row_0/C:col10/1733656807993/Put/seqid=0 2024-12-08T11:20:08,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741885_1061 (size=12151) 2024-12-08T11:20:08,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656868232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656868232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656868234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656868234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656868238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,271 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/4e0e99048778416f9a8f56ac9d3879ae as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4e0e99048778416f9a8f56ac9d3879ae 2024-12-08T11:20:08,278 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/ac9ff35427094a83921aba92f7d2ea1f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/ac9ff35427094a83921aba92f7d2ea1f 2024-12-08T11:20:08,284 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 4e0e99048778416f9a8f56ac9d3879ae(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:08,284 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:08,284 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=12, startTime=1733656807816; duration=0sec 2024-12-08T11:20:08,284 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:08,284 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:08,285 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-08T11:20:08,288 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61063 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-08T11:20:08,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:08,289 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:08,289 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/77ef602f9d2d4ee2aaf7d99e011f5445, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c86d5bd7027f4bb786aa32c7654e76bb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9602a33cdbe343f991644f264ca0b151, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/d8b6fd1e36df4c8589cc5d034134b82c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a323098ff7bc48ddab81d666e7efcc2f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=59.6 K 2024-12-08T11:20:08,290 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 77ef602f9d2d4ee2aaf7d99e011f5445, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1733656804276 2024-12-08T11:20:08,290 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting c86d5bd7027f4bb786aa32c7654e76bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733656805452 2024-12-08T11:20:08,291 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9602a33cdbe343f991644f264ca0b151, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656805820 2024-12-08T11:20:08,291 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8b6fd1e36df4c8589cc5d034134b82c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656806481 2024-12-08T11:20:08,292 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into ac9ff35427094a83921aba92f7d2ea1f(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:08,292 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:08,292 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=12, startTime=1733656807816; duration=0sec 2024-12-08T11:20:08,292 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:08,292 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:08,293 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a323098ff7bc48ddab81d666e7efcc2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733656806816 2024-12-08T11:20:08,311 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#47 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:08,311 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/26494f189ae94873a96f45f1f5519e7f is 50, key is test_row_0/C:col10/1733656806820/Put/seqid=0 2024-12-08T11:20:08,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741886_1062 (size=12629) 2024-12-08T11:20:08,358 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/26494f189ae94873a96f45f1f5519e7f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/26494f189ae94873a96f45f1f5519e7f 2024-12-08T11:20:08,371 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into 26494f189ae94873a96f45f1f5519e7f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:08,371 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:08,371 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=11, startTime=1733656807816; duration=0sec 2024-12-08T11:20:08,371 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:08,371 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:08,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656868441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656868442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656868442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656868442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656868445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/b2b60fca242640df8c1180ce867fdaba 2024-12-08T11:20:08,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/10af5c4d365f4be79208df88c0aa95f7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/10af5c4d365f4be79208df88c0aa95f7 2024-12-08T11:20:08,661 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/10af5c4d365f4be79208df88c0aa95f7, entries=200, sequenceid=234, filesize=14.2 K 2024-12-08T11:20:08,662 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/be33ef0c7d874b5ab2a218ce53453c7f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/be33ef0c7d874b5ab2a218ce53453c7f 2024-12-08T11:20:08,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/be33ef0c7d874b5ab2a218ce53453c7f, entries=150, sequenceid=234, filesize=11.9 K 2024-12-08T11:20:08,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/b2b60fca242640df8c1180ce867fdaba as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/b2b60fca242640df8c1180ce867fdaba 2024-12-08T11:20:08,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/b2b60fca242640df8c1180ce867fdaba, entries=150, sequenceid=234, filesize=11.9 K 2024-12-08T11:20:08,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 64a616eb95ce0fb49283d502a9d694a3 in 585ms, sequenceid=234, compaction requested=true 2024-12-08T11:20:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:08,689 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:08,689 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:08,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:08,692 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:08,692 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:08,692 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:08,692 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:08,692 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:08,692 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:08,693 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/ac9ff35427094a83921aba92f7d2ea1f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/29680b311e644785ab5a57f4390faefa, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/be33ef0c7d874b5ab2a218ce53453c7f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.0 K 2024-12-08T11:20:08,693 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4e0e99048778416f9a8f56ac9d3879ae, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fce719e2bf744415bfeb7322ab54dfeb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/10af5c4d365f4be79208df88c0aa95f7] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=38.4 K 2024-12-08T11:20:08,693 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ac9ff35427094a83921aba92f7d2ea1f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656806481 2024-12-08T11:20:08,694 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e0e99048778416f9a8f56ac9d3879ae, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656806481 2024-12-08T11:20:08,695 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fce719e2bf744415bfeb7322ab54dfeb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733656806816 2024-12-08T11:20:08,695 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10af5c4d365f4be79208df88c0aa95f7, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733656807991 2024-12-08T11:20:08,696 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 29680b311e644785ab5a57f4390faefa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733656806816 2024-12-08T11:20:08,696 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting be33ef0c7d874b5ab2a218ce53453c7f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733656807991 2024-12-08T11:20:08,720 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#48 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:08,720 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/23f3ccda7b5b4774ae59d5a99442c881 is 50, key is test_row_0/A:col10/1733656807993/Put/seqid=0 2024-12-08T11:20:08,733 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#49 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:08,733 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/a4a0d0f099124354ae9f9a8fa5490e2b is 50, key is test_row_0/B:col10/1733656807993/Put/seqid=0 2024-12-08T11:20:08,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:08,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T11:20:08,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:08,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:08,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:08,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:08,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:08,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:08,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741887_1063 (size=12697) 2024-12-08T11:20:08,792 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/23f3ccda7b5b4774ae59d5a99442c881 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/23f3ccda7b5b4774ae59d5a99442c881 2024-12-08T11:20:08,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bc40e21cb69d4ead8631e78df287a613 is 50, key is test_row_0/A:col10/1733656808119/Put/seqid=0 2024-12-08T11:20:08,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656868780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656868784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656868782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656868790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656868792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,801 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 23f3ccda7b5b4774ae59d5a99442c881(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:08,801 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:08,801 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656808689; duration=0sec 2024-12-08T11:20:08,802 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:08,802 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:08,802 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T11:20:08,804 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T11:20:08,804 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T11:20:08,804 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. because compaction request was cancelled 2024-12-08T11:20:08,804 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:08,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741888_1064 (size=12697) 2024-12-08T11:20:08,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741889_1065 (size=14541) 2024-12-08T11:20:08,827 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bc40e21cb69d4ead8631e78df287a613 2024-12-08T11:20:08,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/18512f634e0547a5b90e55685d5cfaf8 is 50, key is test_row_0/B:col10/1733656808119/Put/seqid=0 2024-12-08T11:20:08,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741890_1066 (size=12151) 2024-12-08T11:20:08,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-08T11:20:08,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/18512f634e0547a5b90e55685d5cfaf8 2024-12-08T11:20:08,869 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-08T11:20:08,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-08T11:20:08,875 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:08,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T11:20:08,876 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:08,876 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:08,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/1dbe2c91838949269bd3c7a13b4f1e22 is 50, key is test_row_0/C:col10/1733656808119/Put/seqid=0 2024-12-08T11:20:08,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656868898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656868900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741891_1067 (size=12151) 2024-12-08T11:20:08,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656868901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/1dbe2c91838949269bd3c7a13b4f1e22 2024-12-08T11:20:08,905 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656868902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656868903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:08,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bc40e21cb69d4ead8631e78df287a613 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bc40e21cb69d4ead8631e78df287a613 2024-12-08T11:20:08,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bc40e21cb69d4ead8631e78df287a613, entries=200, sequenceid=253, filesize=14.2 K 2024-12-08T11:20:08,927 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/18512f634e0547a5b90e55685d5cfaf8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/18512f634e0547a5b90e55685d5cfaf8 2024-12-08T11:20:08,938 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/18512f634e0547a5b90e55685d5cfaf8, entries=150, sequenceid=253, filesize=11.9 K 2024-12-08T11:20:08,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/1dbe2c91838949269bd3c7a13b4f1e22 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1dbe2c91838949269bd3c7a13b4f1e22 2024-12-08T11:20:08,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1dbe2c91838949269bd3c7a13b4f1e22, entries=150, sequenceid=253, filesize=11.9 K 2024-12-08T11:20:08,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 64a616eb95ce0fb49283d502a9d694a3 in 216ms, sequenceid=253, compaction requested=true 2024-12-08T11:20:08,967 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:08,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:08,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:08,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:08,967 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-08T11:20:08,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:08,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:08,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:20:08,969 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T11:20:08,969 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T11:20:08,969 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. because compaction request was cancelled 2024-12-08T11:20:08,969 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:08,969 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-12-08T11:20:08,970 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-08T11:20:08,970 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-08T11:20:08,970 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. because compaction request was cancelled 2024-12-08T11:20:08,970 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:08,970 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:08,972 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:08,972 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:08,972 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:08,972 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/26494f189ae94873a96f45f1f5519e7f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/b2b60fca242640df8c1180ce867fdaba, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1dbe2c91838949269bd3c7a13b4f1e22] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.1 K 2024-12-08T11:20:08,973 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26494f189ae94873a96f45f1f5519e7f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1733656806816 2024-12-08T11:20:08,973 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2b60fca242640df8c1180ce867fdaba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733656807991 2024-12-08T11:20:08,974 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dbe2c91838949269bd3c7a13b4f1e22, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733656808119 2024-12-08T11:20:08,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T11:20:08,991 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#53 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:08,992 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/695e850de61d4c329c2ae1f680f2edef is 50, key is test_row_0/C:col10/1733656808119/Put/seqid=0 2024-12-08T11:20:09,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741892_1068 (size=12731) 2024-12-08T11:20:09,030 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-08T11:20:09,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:09,032 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:20:09,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:09,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:09,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:09,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:09,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:09,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:09,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7fd0b1765deb4eb3baed3bf5f244910a is 50, key is test_row_0/A:col10/1733656808790/Put/seqid=0 2024-12-08T11:20:09,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741893_1069 (size=12301) 2024-12-08T11:20:09,055 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7fd0b1765deb4eb3baed3bf5f244910a 2024-12-08T11:20:09,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/66a2a00c6a6648749353a496eafda6ba is 50, key is test_row_0/B:col10/1733656808790/Put/seqid=0 2024-12-08T11:20:09,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:09,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:09,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741894_1070 (size=12301) 2024-12-08T11:20:09,112 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/66a2a00c6a6648749353a496eafda6ba 2024-12-08T11:20:09,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/cb87fc78e78f46cb8aea3cece2be5d70 is 50, key is test_row_0/C:col10/1733656808790/Put/seqid=0 2024-12-08T11:20:09,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741895_1071 (size=12301) 2024-12-08T11:20:09,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T11:20:09,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656869165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656869166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656869214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656869214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656869214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,223 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/a4a0d0f099124354ae9f9a8fa5490e2b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/a4a0d0f099124354ae9f9a8fa5490e2b 2024-12-08T11:20:09,232 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into a4a0d0f099124354ae9f9a8fa5490e2b(size=12.4 K), total size for store is 24.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:09,232 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:09,232 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656808689; duration=0sec 2024-12-08T11:20:09,232 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:09,232 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:09,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656869316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656869316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656869320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656869320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656869321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,423 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/695e850de61d4c329c2ae1f680f2edef as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/695e850de61d4c329c2ae1f680f2edef 2024-12-08T11:20:09,431 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into 695e850de61d4c329c2ae1f680f2edef(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:09,431 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:09,431 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656808968; duration=0sec 2024-12-08T11:20:09,431 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:09,431 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:09,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T11:20:09,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656869520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656869519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656869523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656869523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656869524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,534 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/cb87fc78e78f46cb8aea3cece2be5d70 2024-12-08T11:20:09,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7fd0b1765deb4eb3baed3bf5f244910a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fd0b1765deb4eb3baed3bf5f244910a 2024-12-08T11:20:09,552 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fd0b1765deb4eb3baed3bf5f244910a, entries=150, sequenceid=274, filesize=12.0 K 2024-12-08T11:20:09,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/66a2a00c6a6648749353a496eafda6ba as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/66a2a00c6a6648749353a496eafda6ba 2024-12-08T11:20:09,562 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/66a2a00c6a6648749353a496eafda6ba, entries=150, sequenceid=274, filesize=12.0 K 2024-12-08T11:20:09,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/cb87fc78e78f46cb8aea3cece2be5d70 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/cb87fc78e78f46cb8aea3cece2be5d70 2024-12-08T11:20:09,570 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/cb87fc78e78f46cb8aea3cece2be5d70, entries=150, sequenceid=274, filesize=12.0 K 2024-12-08T11:20:09,572 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 64a616eb95ce0fb49283d502a9d694a3 in 540ms, sequenceid=274, compaction requested=true 2024-12-08T11:20:09,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:09,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:09,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-08T11:20:09,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-08T11:20:09,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-08T11:20:09,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 698 msec 2024-12-08T11:20:09,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 707 msec 2024-12-08T11:20:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:09,829 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T11:20:09,829 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:09,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:09,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:09,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:09,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:09,830 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:09,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/e0de9024f90c4c01a6de493e3e4765c6 is 50, key is test_row_0/A:col10/1733656809164/Put/seqid=0 2024-12-08T11:20:09,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741896_1072 (size=12301) 2024-12-08T11:20:09,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/e0de9024f90c4c01a6de493e3e4765c6 2024-12-08T11:20:09,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/98cac4abf20b43ffa24dc49a822f0769 is 50, key is test_row_0/B:col10/1733656809164/Put/seqid=0 2024-12-08T11:20:09,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656869872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656869874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656869874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656869877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656869879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741897_1073 (size=12301) 2024-12-08T11:20:09,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/98cac4abf20b43ffa24dc49a822f0769 2024-12-08T11:20:09,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/61a7eac286444630bd337baa9ae886a6 is 50, key is test_row_0/C:col10/1733656809164/Put/seqid=0 2024-12-08T11:20:09,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741898_1074 (size=12301) 2024-12-08T11:20:09,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/61a7eac286444630bd337baa9ae886a6 2024-12-08T11:20:09,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/e0de9024f90c4c01a6de493e3e4765c6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e0de9024f90c4c01a6de493e3e4765c6 2024-12-08T11:20:09,962 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e0de9024f90c4c01a6de493e3e4765c6, entries=150, sequenceid=295, filesize=12.0 K 2024-12-08T11:20:09,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/98cac4abf20b43ffa24dc49a822f0769 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/98cac4abf20b43ffa24dc49a822f0769 2024-12-08T11:20:09,973 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/98cac4abf20b43ffa24dc49a822f0769, entries=150, sequenceid=295, filesize=12.0 K 2024-12-08T11:20:09,978 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/61a7eac286444630bd337baa9ae886a6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/61a7eac286444630bd337baa9ae886a6 2024-12-08T11:20:09,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-08T11:20:09,980 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-08T11:20:09,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656869978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,983 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-08T11:20:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T11:20:09,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,985 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656869983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,986 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:09,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:09,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656869984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656869983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:09,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656869985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:09,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/61a7eac286444630bd337baa9ae886a6, entries=150, sequenceid=295, filesize=12.0 K 2024-12-08T11:20:09,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 64a616eb95ce0fb49283d502a9d694a3 in 161ms, sequenceid=295, compaction requested=true 2024-12-08T11:20:09,991 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:09,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:09,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:09,991 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:09,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:09,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:09,991 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:09,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:09,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:09,993 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:09,993 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:09,993 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:09,993 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/a4a0d0f099124354ae9f9a8fa5490e2b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/18512f634e0547a5b90e55685d5cfaf8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/66a2a00c6a6648749353a496eafda6ba, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/98cac4abf20b43ffa24dc49a822f0769] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=48.3 K 2024-12-08T11:20:09,993 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51840 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:09,993 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:09,994 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:09,994 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/23f3ccda7b5b4774ae59d5a99442c881, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bc40e21cb69d4ead8631e78df287a613, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fd0b1765deb4eb3baed3bf5f244910a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e0de9024f90c4c01a6de493e3e4765c6] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=50.6 K 2024-12-08T11:20:09,994 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 23f3ccda7b5b4774ae59d5a99442c881, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733656807991 2024-12-08T11:20:09,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a4a0d0f099124354ae9f9a8fa5490e2b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1733656807991 2024-12-08T11:20:09,995 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 18512f634e0547a5b90e55685d5cfaf8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733656808119 2024-12-08T11:20:09,996 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc40e21cb69d4ead8631e78df287a613, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733656808119 2024-12-08T11:20:09,996 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 66a2a00c6a6648749353a496eafda6ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733656808775 2024-12-08T11:20:09,996 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7fd0b1765deb4eb3baed3bf5f244910a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733656808775 2024-12-08T11:20:09,997 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 98cac4abf20b43ffa24dc49a822f0769, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733656809162 2024-12-08T11:20:09,997 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0de9024f90c4c01a6de493e3e4765c6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733656809162 2024-12-08T11:20:10,013 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:10,014 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/923b17581e234b718c6baa22c0cdb6d9 is 50, key is test_row_0/A:col10/1733656809164/Put/seqid=0 2024-12-08T11:20:10,018 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#61 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:10,018 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b5c48dbb004844e786c0e41874fa6657 is 50, key is test_row_0/B:col10/1733656809164/Put/seqid=0 2024-12-08T11:20:10,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741899_1075 (size=12983) 2024-12-08T11:20:10,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741900_1076 (size=12983) 2024-12-08T11:20:10,040 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/923b17581e234b718c6baa22c0cdb6d9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/923b17581e234b718c6baa22c0cdb6d9 2024-12-08T11:20:10,056 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 923b17581e234b718c6baa22c0cdb6d9(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:10,056 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:10,056 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=12, startTime=1733656809991; duration=0sec 2024-12-08T11:20:10,056 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:10,056 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:10,057 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:10,060 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:10,060 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:10,060 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:10,060 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/695e850de61d4c329c2ae1f680f2edef, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/cb87fc78e78f46cb8aea3cece2be5d70, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/61a7eac286444630bd337baa9ae886a6] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.5 K 2024-12-08T11:20:10,062 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 695e850de61d4c329c2ae1f680f2edef, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1733656808119 2024-12-08T11:20:10,063 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb87fc78e78f46cb8aea3cece2be5d70, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733656808775 2024-12-08T11:20:10,064 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61a7eac286444630bd337baa9ae886a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733656809162 2024-12-08T11:20:10,077 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#62 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:10,078 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6578d626933b4451b8324c44ee9ae071 is 50, key is test_row_0/C:col10/1733656809164/Put/seqid=0 2024-12-08T11:20:10,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T11:20:10,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741901_1077 (size=12983) 2024-12-08T11:20:10,142 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-08T11:20:10,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:10,143 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:20:10,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:10,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:10,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/2cf2452e39094e7190245297f7e7d9ee is 50, key is test_row_0/A:col10/1733656809849/Put/seqid=0 2024-12-08T11:20:10,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:10,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:10,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741902_1078 (size=12301) 2024-12-08T11:20:10,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656870215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656870215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656870217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656870219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656870221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T11:20:10,328 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656870324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656870325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656870325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656870325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656870325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,439 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b5c48dbb004844e786c0e41874fa6657 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b5c48dbb004844e786c0e41874fa6657 2024-12-08T11:20:10,450 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into b5c48dbb004844e786c0e41874fa6657(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:10,451 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:10,451 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=12, startTime=1733656809991; duration=0sec 2024-12-08T11:20:10,451 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:10,451 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:10,520 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6578d626933b4451b8324c44ee9ae071 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6578d626933b4451b8324c44ee9ae071 2024-12-08T11:20:10,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656870532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656870533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656870534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,537 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into 6578d626933b4451b8324c44ee9ae071(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:10,538 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:10,538 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656809991; duration=0sec 2024-12-08T11:20:10,538 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:10,538 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:10,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656870537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656870538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T11:20:10,599 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/2cf2452e39094e7190245297f7e7d9ee 2024-12-08T11:20:10,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/030ac886b3264b8daa20204b57ab8af1 is 50, key is test_row_0/B:col10/1733656809849/Put/seqid=0 2024-12-08T11:20:10,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741903_1079 (size=12301) 2024-12-08T11:20:10,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656870836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656870838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656870836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656870842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:10,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:10,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656870846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:11,074 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/030ac886b3264b8daa20204b57ab8af1 2024-12-08T11:20:11,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T11:20:11,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6879cb6790e94bf8822d70edeba59a8a is 50, key is test_row_0/C:col10/1733656809849/Put/seqid=0 2024-12-08T11:20:11,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741904_1080 (size=12301) 2024-12-08T11:20:11,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656871342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:11,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656871343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:11,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656871344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:11,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656871349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:11,354 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656871354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:11,511 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6879cb6790e94bf8822d70edeba59a8a 2024-12-08T11:20:11,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/2cf2452e39094e7190245297f7e7d9ee as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2cf2452e39094e7190245297f7e7d9ee 2024-12-08T11:20:11,528 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2cf2452e39094e7190245297f7e7d9ee, entries=150, sequenceid=313, filesize=12.0 K 2024-12-08T11:20:11,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/030ac886b3264b8daa20204b57ab8af1 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/030ac886b3264b8daa20204b57ab8af1 2024-12-08T11:20:11,537 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/030ac886b3264b8daa20204b57ab8af1, entries=150, sequenceid=313, filesize=12.0 K 2024-12-08T11:20:11,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6879cb6790e94bf8822d70edeba59a8a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6879cb6790e94bf8822d70edeba59a8a 2024-12-08T11:20:11,546 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6879cb6790e94bf8822d70edeba59a8a, entries=150, sequenceid=313, filesize=12.0 K 2024-12-08T11:20:11,548 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 64a616eb95ce0fb49283d502a9d694a3 in 1405ms, sequenceid=313, compaction requested=false 2024-12-08T11:20:11,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:11,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:11,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-08T11:20:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-08T11:20:11,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-08T11:20:11,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5640 sec 2024-12-08T11:20:11,556 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.5720 sec 2024-12-08T11:20:12,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-08T11:20:12,093 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-08T11:20:12,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:12,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-08T11:20:12,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T11:20:12,100 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:12,102 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:12,102 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:12,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T11:20:12,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,263 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-08T11:20:12,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:12,264 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T11:20:12,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:12,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:12,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:12,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:12,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:12,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:12,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/9a4123b9b7dc4716aac5e486981b77a0 is 50, key is test_row_0/A:col10/1733656810219/Put/seqid=0 2024-12-08T11:20:12,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741905_1081 (size=12301) 2024-12-08T11:20:12,297 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/9a4123b9b7dc4716aac5e486981b77a0 2024-12-08T11:20:12,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/3f12321c41fc43bc994255f01b35c178 is 50, key is test_row_0/B:col10/1733656810219/Put/seqid=0 2024-12-08T11:20:12,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:12,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:12,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741906_1082 (size=12301) 2024-12-08T11:20:12,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656872370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656872373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656872373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656872376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656872374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T11:20:12,484 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656872478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656872482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656872483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656872483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,489 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656872483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656872690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656872692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656872693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656872693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,695 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656872693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:12,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T11:20:12,760 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/3f12321c41fc43bc994255f01b35c178 2024-12-08T11:20:12,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/7183f89cbe724cd18be8876d42fc817b is 50, key is test_row_0/C:col10/1733656810219/Put/seqid=0 2024-12-08T11:20:12,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741907_1083 (size=12301) 2024-12-08T11:20:12,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:12,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656872997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656872998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656872999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656872999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656873001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,196 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=334 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/7183f89cbe724cd18be8876d42fc817b 2024-12-08T11:20:13,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T11:20:13,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/9a4123b9b7dc4716aac5e486981b77a0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/9a4123b9b7dc4716aac5e486981b77a0 2024-12-08T11:20:13,220 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/9a4123b9b7dc4716aac5e486981b77a0, entries=150, sequenceid=334, filesize=12.0 K 2024-12-08T11:20:13,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/3f12321c41fc43bc994255f01b35c178 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3f12321c41fc43bc994255f01b35c178 2024-12-08T11:20:13,229 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3f12321c41fc43bc994255f01b35c178, entries=150, sequenceid=334, filesize=12.0 K 2024-12-08T11:20:13,230 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/7183f89cbe724cd18be8876d42fc817b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/7183f89cbe724cd18be8876d42fc817b 2024-12-08T11:20:13,236 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/7183f89cbe724cd18be8876d42fc817b, entries=150, sequenceid=334, filesize=12.0 K 2024-12-08T11:20:13,238 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 64a616eb95ce0fb49283d502a9d694a3 in 974ms, sequenceid=334, compaction requested=true 2024-12-08T11:20:13,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:13,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:13,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-08T11:20:13,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-08T11:20:13,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-08T11:20:13,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1380 sec 2024-12-08T11:20:13,244 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.1470 sec 2024-12-08T11:20:13,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T11:20:13,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:13,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:13,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:13,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:13,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:13,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:13,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:13,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7f78fbce47d24f0db6517c92b9a9aa13 is 50, key is test_row_0/A:col10/1733656812373/Put/seqid=0 2024-12-08T11:20:13,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656873527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656873528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656873529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656873529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656873530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741908_1084 (size=14741) 2024-12-08T11:20:13,634 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656873632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656873634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656873635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656873636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656873636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656873837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656873838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656873839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656873840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656873841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:13,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7f78fbce47d24f0db6517c92b9a9aa13 2024-12-08T11:20:13,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/392fba09ecbf4fbbb3181724dba3cc3c is 50, key is test_row_0/B:col10/1733656812373/Put/seqid=0 2024-12-08T11:20:13,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741909_1085 (size=12301) 2024-12-08T11:20:13,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/392fba09ecbf4fbbb3181724dba3cc3c 2024-12-08T11:20:14,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6e7d0dd93a79414ba1acbc53b7e40076 is 50, key is test_row_0/C:col10/1733656812373/Put/seqid=0 2024-12-08T11:20:14,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741910_1086 (size=12301) 2024-12-08T11:20:14,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6e7d0dd93a79414ba1acbc53b7e40076 2024-12-08T11:20:14,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/7f78fbce47d24f0db6517c92b9a9aa13 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7f78fbce47d24f0db6517c92b9a9aa13 2024-12-08T11:20:14,064 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7f78fbce47d24f0db6517c92b9a9aa13, entries=200, sequenceid=352, filesize=14.4 K 2024-12-08T11:20:14,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/392fba09ecbf4fbbb3181724dba3cc3c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/392fba09ecbf4fbbb3181724dba3cc3c 2024-12-08T11:20:14,073 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/392fba09ecbf4fbbb3181724dba3cc3c, entries=150, sequenceid=352, filesize=12.0 K 2024-12-08T11:20:14,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/6e7d0dd93a79414ba1acbc53b7e40076 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6e7d0dd93a79414ba1acbc53b7e40076 2024-12-08T11:20:14,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6e7d0dd93a79414ba1acbc53b7e40076, entries=150, sequenceid=352, filesize=12.0 K 2024-12-08T11:20:14,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 64a616eb95ce0fb49283d502a9d694a3 in 582ms, sequenceid=352, compaction requested=true 2024-12-08T11:20:14,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:14,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:14,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:14,087 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:14,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:14,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:14,087 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:14,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:14,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:14,089 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:14,089 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:14,089 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,090 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b5c48dbb004844e786c0e41874fa6657, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/030ac886b3264b8daa20204b57ab8af1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3f12321c41fc43bc994255f01b35c178, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/392fba09ecbf4fbbb3181724dba3cc3c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=48.7 K 2024-12-08T11:20:14,090 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:14,090 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:14,090 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,090 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/923b17581e234b718c6baa22c0cdb6d9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2cf2452e39094e7190245297f7e7d9ee, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/9a4123b9b7dc4716aac5e486981b77a0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7f78fbce47d24f0db6517c92b9a9aa13] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=51.1 K 2024-12-08T11:20:14,091 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b5c48dbb004844e786c0e41874fa6657, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733656809162 2024-12-08T11:20:14,091 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 923b17581e234b718c6baa22c0cdb6d9, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733656809162 2024-12-08T11:20:14,092 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 030ac886b3264b8daa20204b57ab8af1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733656809849 2024-12-08T11:20:14,092 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cf2452e39094e7190245297f7e7d9ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733656809849 2024-12-08T11:20:14,092 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f12321c41fc43bc994255f01b35c178, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733656810215 2024-12-08T11:20:14,092 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a4123b9b7dc4716aac5e486981b77a0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733656810215 2024-12-08T11:20:14,093 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 392fba09ecbf4fbbb3181724dba3cc3c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733656812373 2024-12-08T11:20:14,094 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f78fbce47d24f0db6517c92b9a9aa13, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733656812370 2024-12-08T11:20:14,119 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#72 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:14,120 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/70ed2c5e061c469e9427ff23720f2c25 is 50, key is test_row_0/B:col10/1733656812373/Put/seqid=0 2024-12-08T11:20:14,124 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#73 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:14,125 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/8bf91cb5c3704eaf83223953cb0e398d is 50, key is test_row_0/A:col10/1733656812373/Put/seqid=0 2024-12-08T11:20:14,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:20:14,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:14,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:14,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:14,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:14,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:14,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:14,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:14,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656874171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741911_1087 (size=13119) 2024-12-08T11:20:14,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656874172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656874173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656874175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656874175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741912_1088 (size=13119) 2024-12-08T11:20:14,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/e8284af994874039956e55c871c5c496 is 50, key is test_row_0/A:col10/1733656813528/Put/seqid=0 2024-12-08T11:20:14,197 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/70ed2c5e061c469e9427ff23720f2c25 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/70ed2c5e061c469e9427ff23720f2c25 2024-12-08T11:20:14,197 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/8bf91cb5c3704eaf83223953cb0e398d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/8bf91cb5c3704eaf83223953cb0e398d 2024-12-08T11:20:14,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-08T11:20:14,205 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-08T11:20:14,206 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into 70ed2c5e061c469e9427ff23720f2c25(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:14,206 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:14,206 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=12, startTime=1733656814087; duration=0sec 2024-12-08T11:20:14,206 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:14,206 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:14,206 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:14,207 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 8bf91cb5c3704eaf83223953cb0e398d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:14,207 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:14,207 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:14,207 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=12, startTime=1733656814087; duration=0sec 2024-12-08T11:20:14,207 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:14,207 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:14,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-12-08T11:20:14,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T11:20:14,209 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:14,210 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:14,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:14,211 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:14,211 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:14,211 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,211 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6578d626933b4451b8324c44ee9ae071, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6879cb6790e94bf8822d70edeba59a8a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/7183f89cbe724cd18be8876d42fc817b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6e7d0dd93a79414ba1acbc53b7e40076] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=48.7 K 2024-12-08T11:20:14,212 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 6578d626933b4451b8324c44ee9ae071, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1733656809162 2024-12-08T11:20:14,212 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 6879cb6790e94bf8822d70edeba59a8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1733656809849 2024-12-08T11:20:14,213 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7183f89cbe724cd18be8876d42fc817b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=334, earliestPutTs=1733656810215 2024-12-08T11:20:14,214 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e7d0dd93a79414ba1acbc53b7e40076, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733656812373 2024-12-08T11:20:14,235 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#75 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:14,236 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/ec0e5559cdbb40aa96fabeff1765d925 is 50, key is test_row_0/C:col10/1733656812373/Put/seqid=0 2024-12-08T11:20:14,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741913_1089 (size=12301) 2024-12-08T11:20:14,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/e8284af994874039956e55c871c5c496 2024-12-08T11:20:14,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741914_1090 (size=13119) 2024-12-08T11:20:14,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b8a2a45b9f084398b186dd30294704e2 is 50, key is test_row_0/B:col10/1733656813528/Put/seqid=0 2024-12-08T11:20:14,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656874286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656874287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656874287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,288 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/ec0e5559cdbb40aa96fabeff1765d925 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ec0e5559cdbb40aa96fabeff1765d925 2024-12-08T11:20:14,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656874287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,291 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656874288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,297 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into ec0e5559cdbb40aa96fabeff1765d925(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:14,297 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:14,297 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=12, startTime=1733656814087; duration=0sec 2024-12-08T11:20:14,297 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:14,297 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:14,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T11:20:14,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741915_1091 (size=12301) 2024-12-08T11:20:14,317 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b8a2a45b9f084398b186dd30294704e2 2024-12-08T11:20:14,327 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/58fa07a08cf346339fb76a7a35f1664a is 50, key is test_row_0/C:col10/1733656813528/Put/seqid=0 2024-12-08T11:20:14,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741916_1092 (size=12301) 2024-12-08T11:20:14,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/58fa07a08cf346339fb76a7a35f1664a 2024-12-08T11:20:14,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/e8284af994874039956e55c871c5c496 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e8284af994874039956e55c871c5c496 2024-12-08T11:20:14,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e8284af994874039956e55c871c5c496, entries=150, sequenceid=372, filesize=12.0 K 2024-12-08T11:20:14,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/b8a2a45b9f084398b186dd30294704e2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b8a2a45b9f084398b186dd30294704e2 2024-12-08T11:20:14,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b8a2a45b9f084398b186dd30294704e2, entries=150, sequenceid=372, filesize=12.0 K 2024-12-08T11:20:14,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/58fa07a08cf346339fb76a7a35f1664a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/58fa07a08cf346339fb76a7a35f1664a 2024-12-08T11:20:14,362 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/58fa07a08cf346339fb76a7a35f1664a, entries=150, sequenceid=372, filesize=12.0 K 2024-12-08T11:20:14,362 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:14,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:14,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 64a616eb95ce0fb49283d502a9d694a3 in 217ms, sequenceid=372, compaction requested=false 2024-12-08T11:20:14,364 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:14,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T11:20:14,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:14,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:14,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:14,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:14,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:14,492 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:14,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:14,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bcddc008914e42f5824c19d2b2ec297c is 50, key is test_row_0/A:col10/1733656814489/Put/seqid=0 2024-12-08T11:20:14,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T11:20:14,519 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656874513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656874514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:14,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656874516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:14,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656874517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,523 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656874519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741917_1093 (size=12301) 2024-12-08T11:20:14,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656874621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656874622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656874624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,629 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656874629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656874632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,681 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:14,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:14,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,682 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T11:20:14,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656874825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656874826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656874827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656874831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,835 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:14,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656874835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,836 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:14,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:14,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,836 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bcddc008914e42f5824c19d2b2ec297c 2024-12-08T11:20:14,967 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/adaaaca5e2a34b5a8dd1b6a539924052 is 50, key is test_row_0/B:col10/1733656814489/Put/seqid=0 2024-12-08T11:20:14,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741918_1094 (size=12301) 2024-12-08T11:20:14,989 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:14,990 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:14,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:14,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:14,990 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:14,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,127 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656875127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656875130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656875131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656875137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656875140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,143 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,143 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:15,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:15,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,144 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,297 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:15,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:15,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T11:20:15,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/adaaaca5e2a34b5a8dd1b6a539924052 2024-12-08T11:20:15,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/5f93055419f046138e6f498d526bfff6 is 50, key is test_row_0/C:col10/1733656814489/Put/seqid=0 2024-12-08T11:20:15,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741919_1095 (size=12301) 2024-12-08T11:20:15,453 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,454 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:15,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:15,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,606 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:15,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:15,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656875629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656875635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656875636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656875645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:15,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656875648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,761 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:15,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:15,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:15,806 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=393 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/5f93055419f046138e6f498d526bfff6 2024-12-08T11:20:15,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/bcddc008914e42f5824c19d2b2ec297c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bcddc008914e42f5824c19d2b2ec297c 2024-12-08T11:20:15,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bcddc008914e42f5824c19d2b2ec297c, entries=150, sequenceid=393, filesize=12.0 K 2024-12-08T11:20:15,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/adaaaca5e2a34b5a8dd1b6a539924052 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/adaaaca5e2a34b5a8dd1b6a539924052 2024-12-08T11:20:15,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/adaaaca5e2a34b5a8dd1b6a539924052, entries=150, sequenceid=393, filesize=12.0 K 2024-12-08T11:20:15,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/5f93055419f046138e6f498d526bfff6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/5f93055419f046138e6f498d526bfff6 2024-12-08T11:20:15,847 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/5f93055419f046138e6f498d526bfff6, entries=150, sequenceid=393, filesize=12.0 K 2024-12-08T11:20:15,848 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 64a616eb95ce0fb49283d502a9d694a3 in 1357ms, sequenceid=393, compaction requested=true 2024-12-08T11:20:15,848 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:15,849 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:15,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:15,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:15,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:15,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:15,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:15,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:15,849 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:15,850 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:15,850 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:15,851 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,851 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:15,851 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/8bf91cb5c3704eaf83223953cb0e398d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e8284af994874039956e55c871c5c496, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bcddc008914e42f5824c19d2b2ec297c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.8 K 2024-12-08T11:20:15,851 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:15,851 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,851 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/70ed2c5e061c469e9427ff23720f2c25, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b8a2a45b9f084398b186dd30294704e2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/adaaaca5e2a34b5a8dd1b6a539924052] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.8 K 2024-12-08T11:20:15,852 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8bf91cb5c3704eaf83223953cb0e398d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733656812373 2024-12-08T11:20:15,852 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 70ed2c5e061c469e9427ff23720f2c25, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733656812373 2024-12-08T11:20:15,852 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8284af994874039956e55c871c5c496, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656813527 2024-12-08T11:20:15,853 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b8a2a45b9f084398b186dd30294704e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656813527 2024-12-08T11:20:15,853 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcddc008914e42f5824c19d2b2ec297c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733656814169 2024-12-08T11:20:15,854 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting adaaaca5e2a34b5a8dd1b6a539924052, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733656814169 2024-12-08T11:20:15,875 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#81 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:15,876 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0445a237159046b1b22afa02c398e09a is 50, key is test_row_0/B:col10/1733656814489/Put/seqid=0 2024-12-08T11:20:15,880 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:15,881 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/f6e94a4f3a974b2493ff06af113343d6 is 50, key is test_row_0/A:col10/1733656814489/Put/seqid=0 2024-12-08T11:20:15,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741920_1096 (size=13221) 2024-12-08T11:20:15,911 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0445a237159046b1b22afa02c398e09a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0445a237159046b1b22afa02c398e09a 2024-12-08T11:20:15,914 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:15,914 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-12-08T11:20:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,915 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T11:20:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:15,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:15,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:15,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:15,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:15,920 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into 0445a237159046b1b22afa02c398e09a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:15,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:15,920 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656815849; duration=0sec 2024-12-08T11:20:15,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:15,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:15,924 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:15,925 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:15,925 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:15,925 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:15,926 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ec0e5559cdbb40aa96fabeff1765d925, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/58fa07a08cf346339fb76a7a35f1664a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/5f93055419f046138e6f498d526bfff6] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.8 K 2024-12-08T11:20:15,926 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ec0e5559cdbb40aa96fabeff1765d925, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1733656812373 2024-12-08T11:20:15,927 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 58fa07a08cf346339fb76a7a35f1664a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656813527 2024-12-08T11:20:15,927 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f93055419f046138e6f498d526bfff6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733656814169 2024-12-08T11:20:15,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/933fdf2849a349158e16a7206401bda8 is 50, key is test_row_0/A:col10/1733656814504/Put/seqid=0 2024-12-08T11:20:15,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741921_1097 (size=13221) 2024-12-08T11:20:15,941 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#84 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:15,941 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c294cb9563d04b858d3f3df342f2ecfb is 50, key is test_row_0/C:col10/1733656814489/Put/seqid=0 2024-12-08T11:20:15,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741922_1098 (size=13221) 2024-12-08T11:20:15,973 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/c294cb9563d04b858d3f3df342f2ecfb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c294cb9563d04b858d3f3df342f2ecfb 2024-12-08T11:20:15,982 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into c294cb9563d04b858d3f3df342f2ecfb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:15,982 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:15,982 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656815849; duration=0sec 2024-12-08T11:20:15,983 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:15,983 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:15,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741923_1099 (size=12301) 2024-12-08T11:20:15,993 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/933fdf2849a349158e16a7206401bda8 2024-12-08T11:20:16,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/84fb3a79993844cc87196896be70b664 is 50, key is test_row_0/B:col10/1733656814504/Put/seqid=0 2024-12-08T11:20:16,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741924_1100 (size=12301) 2024-12-08T11:20:16,036 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/84fb3a79993844cc87196896be70b664 2024-12-08T11:20:16,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/90d4d8485724487b8d7f0417dea92ce4 is 50, key is test_row_0/C:col10/1733656814504/Put/seqid=0 2024-12-08T11:20:16,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741925_1101 (size=12301) 2024-12-08T11:20:16,063 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/90d4d8485724487b8d7f0417dea92ce4 2024-12-08T11:20:16,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/933fdf2849a349158e16a7206401bda8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/933fdf2849a349158e16a7206401bda8 2024-12-08T11:20:16,082 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/933fdf2849a349158e16a7206401bda8, entries=150, sequenceid=411, filesize=12.0 K 2024-12-08T11:20:16,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/84fb3a79993844cc87196896be70b664 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/84fb3a79993844cc87196896be70b664 2024-12-08T11:20:16,091 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/84fb3a79993844cc87196896be70b664, entries=150, sequenceid=411, filesize=12.0 K 2024-12-08T11:20:16,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/90d4d8485724487b8d7f0417dea92ce4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/90d4d8485724487b8d7f0417dea92ce4 2024-12-08T11:20:16,100 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/90d4d8485724487b8d7f0417dea92ce4, entries=150, sequenceid=411, filesize=12.0 K 2024-12-08T11:20:16,103 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=0 B/0 for 64a616eb95ce0fb49283d502a9d694a3 in 188ms, sequenceid=411, compaction requested=false 2024-12-08T11:20:16,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:16,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:16,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-12-08T11:20:16,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-12-08T11:20:16,107 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-08T11:20:16,107 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8950 sec 2024-12-08T11:20:16,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.9020 sec 2024-12-08T11:20:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-08T11:20:16,314 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-08T11:20:16,316 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:16,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-12-08T11:20:16,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T11:20:16,322 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:16,323 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:16,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:16,355 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/f6e94a4f3a974b2493ff06af113343d6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/f6e94a4f3a974b2493ff06af113343d6 2024-12-08T11:20:16,366 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into f6e94a4f3a974b2493ff06af113343d6(size=12.9 K), total size for store is 24.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:16,366 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:16,366 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656815849; duration=0sec 2024-12-08T11:20:16,366 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:16,366 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:16,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T11:20:16,476 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-12-08T11:20:16,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:16,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:16,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:16,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-08T11:20:16,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-12-08T11:20:16,481 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-08T11:20:16,482 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 156 msec 2024-12-08T11:20:16,485 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 167 msec 2024-12-08T11:20:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-12-08T11:20:16,623 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-12-08T11:20:16,625 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:16,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-12-08T11:20:16,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T11:20:16,627 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:16,627 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:16,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:16,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:16,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:20:16,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:16,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:16,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:16,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a1a65910e3cf4eb59148a5bbd24b7574 is 50, key is test_row_0/A:col10/1733656816650/Put/seqid=0 2024-12-08T11:20:16,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741926_1102 (size=12301) 2024-12-08T11:20:16,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656876682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,686 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656876683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656876684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656876685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656876686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T11:20:16,779 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-08T11:20:16,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:16,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:16,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:16,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:16,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:16,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:16,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656876787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656876788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656876790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656876797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:16,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656876799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T11:20:16,933 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:16,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-08T11:20:16,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:16,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:16,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:16,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:16,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:16,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:16,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656876997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656876997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656876998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656877000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656877006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a1a65910e3cf4eb59148a5bbd24b7574 2024-12-08T11:20:17,090 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/af9d2367fd0045c190886c61ec66ab25 is 50, key is test_row_0/B:col10/1733656816650/Put/seqid=0 2024-12-08T11:20:17,100 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,101 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-08T11:20:17,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:17,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,101 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741927_1103 (size=12301) 2024-12-08T11:20:17,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/af9d2367fd0045c190886c61ec66ab25 2024-12-08T11:20:17,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/f2323fa6d74f4965974cb84ced38e64c is 50, key is test_row_0/C:col10/1733656816650/Put/seqid=0 2024-12-08T11:20:17,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741928_1104 (size=12301) 2024-12-08T11:20:17,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T11:20:17,254 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-08T11:20:17,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:17,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,302 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656877302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656877305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656877305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656877306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,315 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656877314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,410 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-08T11:20:17,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:17,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,411 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-08T11:20:17,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:17,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] handler.RSProcedureHandler(58): pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=31 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=31 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:17,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=426 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/f2323fa6d74f4965974cb84ced38e64c 2024-12-08T11:20:17,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/a1a65910e3cf4eb59148a5bbd24b7574 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a1a65910e3cf4eb59148a5bbd24b7574 2024-12-08T11:20:17,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a1a65910e3cf4eb59148a5bbd24b7574, entries=150, sequenceid=426, filesize=12.0 K 2024-12-08T11:20:17,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/af9d2367fd0045c190886c61ec66ab25 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/af9d2367fd0045c190886c61ec66ab25 2024-12-08T11:20:17,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/af9d2367fd0045c190886c61ec66ab25, entries=150, sequenceid=426, filesize=12.0 K 2024-12-08T11:20:17,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/f2323fa6d74f4965974cb84ced38e64c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f2323fa6d74f4965974cb84ced38e64c 2024-12-08T11:20:17,616 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f2323fa6d74f4965974cb84ced38e64c, entries=150, sequenceid=426, filesize=12.0 K 2024-12-08T11:20:17,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 64a616eb95ce0fb49283d502a9d694a3 in 966ms, sequenceid=426, compaction requested=true 2024-12-08T11:20:17,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:17,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:17,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:17,618 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:17,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:17,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:17,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:17,618 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:17,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:17,619 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:17,619 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:17,619 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,620 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/f6e94a4f3a974b2493ff06af113343d6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/933fdf2849a349158e16a7206401bda8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a1a65910e3cf4eb59148a5bbd24b7574] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.9 K 2024-12-08T11:20:17,620 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:17,620 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:17,621 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,621 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0445a237159046b1b22afa02c398e09a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/84fb3a79993844cc87196896be70b664, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/af9d2367fd0045c190886c61ec66ab25] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.9 K 2024-12-08T11:20:17,621 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 0445a237159046b1b22afa02c398e09a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733656814169 2024-12-08T11:20:17,622 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6e94a4f3a974b2493ff06af113343d6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733656814169 2024-12-08T11:20:17,622 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 84fb3a79993844cc87196896be70b664, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733656814504 2024-12-08T11:20:17,623 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 933fdf2849a349158e16a7206401bda8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733656814504 2024-12-08T11:20:17,623 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting af9d2367fd0045c190886c61ec66ab25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733656816634 2024-12-08T11:20:17,623 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1a65910e3cf4eb59148a5bbd24b7574, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733656816634 2024-12-08T11:20:17,638 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:17,639 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/46379948e34349b3aeead14519396bcc is 50, key is test_row_0/B:col10/1733656816650/Put/seqid=0 2024-12-08T11:20:17,650 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:17,651 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/0d848023e47d4357bdc5ad41d56f5f8a is 50, key is test_row_0/A:col10/1733656816650/Put/seqid=0 2024-12-08T11:20:17,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741929_1105 (size=13323) 2024-12-08T11:20:17,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741930_1106 (size=13323) 2024-12-08T11:20:17,661 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/0d848023e47d4357bdc5ad41d56f5f8a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/0d848023e47d4357bdc5ad41d56f5f8a 2024-12-08T11:20:17,669 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 0d848023e47d4357bdc5ad41d56f5f8a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:17,669 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:17,669 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656817617; duration=0sec 2024-12-08T11:20:17,669 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:17,669 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:17,669 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:17,671 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:17,671 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:17,671 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,671 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c294cb9563d04b858d3f3df342f2ecfb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/90d4d8485724487b8d7f0417dea92ce4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f2323fa6d74f4965974cb84ced38e64c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=36.9 K 2024-12-08T11:20:17,672 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting c294cb9563d04b858d3f3df342f2ecfb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=393, earliestPutTs=1733656814169 2024-12-08T11:20:17,672 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90d4d8485724487b8d7f0417dea92ce4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1733656814504 2024-12-08T11:20:17,673 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2323fa6d74f4965974cb84ced38e64c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733656816634 2024-12-08T11:20:17,683 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#92 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:17,684 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/a3c71ccf51794b938d2e1ff12f4f3c70 is 50, key is test_row_0/C:col10/1733656816650/Put/seqid=0 2024-12-08T11:20:17,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741931_1107 (size=13323) 2024-12-08T11:20:17,707 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/a3c71ccf51794b938d2e1ff12f4f3c70 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a3c71ccf51794b938d2e1ff12f4f3c70 2024-12-08T11:20:17,716 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into a3c71ccf51794b938d2e1ff12f4f3c70(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:17,717 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:17,717 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656817618; duration=0sec 2024-12-08T11:20:17,717 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:17,717 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:17,721 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-12-08T11:20:17,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:17,722 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:20:17,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:17,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:17,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:17,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:17,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:17,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:17,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T11:20:17,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fc7d5d7dcd60462da6ad8deb8fc84bf9 is 50, key is test_row_0/A:col10/1733656816683/Put/seqid=0 2024-12-08T11:20:17,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741932_1108 (size=12301) 2024-12-08T11:20:17,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:17,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:17,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656877817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,819 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656877817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656877819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656877819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656877819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656877921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656877921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656877921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:17,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:17,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656877938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,060 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/46379948e34349b3aeead14519396bcc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/46379948e34349b3aeead14519396bcc 2024-12-08T11:20:18,067 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into 46379948e34349b3aeead14519396bcc(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:18,067 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:18,067 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656817618; duration=0sec 2024-12-08T11:20:18,068 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:18,068 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:18,124 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656878124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656878124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656878126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656878140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,143 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fc7d5d7dcd60462da6ad8deb8fc84bf9 2024-12-08T11:20:18,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7d2ead286b5048c8b5b4747b3bb2adc6 is 50, key is test_row_0/B:col10/1733656816683/Put/seqid=0 2024-12-08T11:20:18,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741933_1109 (size=12301) 2024-12-08T11:20:18,191 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7d2ead286b5048c8b5b4747b3bb2adc6 2024-12-08T11:20:18,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/1ab145cf7b7840e69b2aea0539f2f7ad is 50, key is test_row_0/C:col10/1733656816683/Put/seqid=0 2024-12-08T11:20:18,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741934_1110 (size=12301) 2024-12-08T11:20:18,253 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/1ab145cf7b7840e69b2aea0539f2f7ad 2024-12-08T11:20:18,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fc7d5d7dcd60462da6ad8deb8fc84bf9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc7d5d7dcd60462da6ad8deb8fc84bf9 2024-12-08T11:20:18,271 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc7d5d7dcd60462da6ad8deb8fc84bf9, entries=150, sequenceid=453, filesize=12.0 K 2024-12-08T11:20:18,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7d2ead286b5048c8b5b4747b3bb2adc6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7d2ead286b5048c8b5b4747b3bb2adc6 2024-12-08T11:20:18,280 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7d2ead286b5048c8b5b4747b3bb2adc6, entries=150, sequenceid=453, filesize=12.0 K 2024-12-08T11:20:18,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/1ab145cf7b7840e69b2aea0539f2f7ad as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1ab145cf7b7840e69b2aea0539f2f7ad 2024-12-08T11:20:18,289 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1ab145cf7b7840e69b2aea0539f2f7ad, entries=150, sequenceid=453, filesize=12.0 K 2024-12-08T11:20:18,292 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 64a616eb95ce0fb49283d502a9d694a3 in 570ms, sequenceid=453, compaction requested=false 2024-12-08T11:20:18,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:18,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:18,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-12-08T11:20:18,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-12-08T11:20:18,296 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-12-08T11:20:18,296 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6660 sec 2024-12-08T11:20:18,298 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 1.6720 sec 2024-12-08T11:20:18,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:18,427 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:20:18,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:18,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:18,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:18,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:18,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:18,428 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:18,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/ac256a2c165341068ba89c758142f2b5 is 50, key is test_row_0/A:col10/1733656817817/Put/seqid=0 2024-12-08T11:20:18,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741935_1111 (size=12301) 2024-12-08T11:20:18,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/ac256a2c165341068ba89c758142f2b5 2024-12-08T11:20:18,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656878451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656878452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,455 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7adbf2671a6c47389a7129985326fef7 is 50, key is test_row_0/B:col10/1733656817817/Put/seqid=0 2024-12-08T11:20:18,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656878452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,479 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656878453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741936_1112 (size=12301) 2024-12-08T11:20:18,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7adbf2671a6c47389a7129985326fef7 2024-12-08T11:20:18,505 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/2659fd4862d743088571e42d9a9040e8 is 50, key is test_row_0/C:col10/1733656817817/Put/seqid=0 2024-12-08T11:20:18,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741937_1113 (size=12301) 2024-12-08T11:20:18,516 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=466 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/2659fd4862d743088571e42d9a9040e8 2024-12-08T11:20:18,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/ac256a2c165341068ba89c758142f2b5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/ac256a2c165341068ba89c758142f2b5 2024-12-08T11:20:18,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/ac256a2c165341068ba89c758142f2b5, entries=150, sequenceid=466, filesize=12.0 K 2024-12-08T11:20:18,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/7adbf2671a6c47389a7129985326fef7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7adbf2671a6c47389a7129985326fef7 2024-12-08T11:20:18,541 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7adbf2671a6c47389a7129985326fef7, entries=150, sequenceid=466, filesize=12.0 K 2024-12-08T11:20:18,543 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/2659fd4862d743088571e42d9a9040e8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2659fd4862d743088571e42d9a9040e8 2024-12-08T11:20:18,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2659fd4862d743088571e42d9a9040e8, entries=150, sequenceid=466, filesize=12.0 K 2024-12-08T11:20:18,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 64a616eb95ce0fb49283d502a9d694a3 in 123ms, sequenceid=466, compaction requested=true 2024-12-08T11:20:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:18,551 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:18,551 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:18,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:18,553 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:18,553 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:18,553 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:18,553 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:18,553 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:18,553 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:18,553 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/0d848023e47d4357bdc5ad41d56f5f8a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc7d5d7dcd60462da6ad8deb8fc84bf9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/ac256a2c165341068ba89c758142f2b5] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.0 K 2024-12-08T11:20:18,553 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/46379948e34349b3aeead14519396bcc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7d2ead286b5048c8b5b4747b3bb2adc6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7adbf2671a6c47389a7129985326fef7] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.0 K 2024-12-08T11:20:18,554 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d848023e47d4357bdc5ad41d56f5f8a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733656816634 2024-12-08T11:20:18,554 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 46379948e34349b3aeead14519396bcc, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733656816634 2024-12-08T11:20:18,554 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc7d5d7dcd60462da6ad8deb8fc84bf9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1733656816678 2024-12-08T11:20:18,555 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d2ead286b5048c8b5b4747b3bb2adc6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1733656816678 2024-12-08T11:20:18,555 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7adbf2671a6c47389a7129985326fef7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1733656817815 2024-12-08T11:20:18,555 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac256a2c165341068ba89c758142f2b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1733656817815 2024-12-08T11:20:18,566 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#99 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:18,567 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/4b2fcfc4a9144caa875525f11af72aa6 is 50, key is test_row_0/A:col10/1733656817817/Put/seqid=0 2024-12-08T11:20:18,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:18,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:20:18,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:18,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:18,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:18,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:18,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:18,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:18,581 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#100 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:18,581 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/2c18a5523c5d469e880b4e5e46c1727f is 50, key is test_row_0/B:col10/1733656817817/Put/seqid=0 2024-12-08T11:20:18,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656878586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656878588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,591 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656878588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656878589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,594 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/55cb164cf3064ff6ba86c65308ee7e6b is 50, key is test_row_0/A:col10/1733656818452/Put/seqid=0 2024-12-08T11:20:18,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741938_1114 (size=13425) 2024-12-08T11:20:18,628 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/4b2fcfc4a9144caa875525f11af72aa6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4b2fcfc4a9144caa875525f11af72aa6 2024-12-08T11:20:18,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741939_1115 (size=13425) 2024-12-08T11:20:18,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741940_1116 (size=12301) 2024-12-08T11:20:18,638 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 4b2fcfc4a9144caa875525f11af72aa6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:18,638 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:18,638 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656818551; duration=0sec 2024-12-08T11:20:18,638 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:18,638 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:18,638 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:18,640 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:18,640 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:18,640 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:18,640 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a3c71ccf51794b938d2e1ff12f4f3c70, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1ab145cf7b7840e69b2aea0539f2f7ad, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2659fd4862d743088571e42d9a9040e8] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.0 K 2024-12-08T11:20:18,641 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3c71ccf51794b938d2e1ff12f4f3c70, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=426, earliestPutTs=1733656816634 2024-12-08T11:20:18,641 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ab145cf7b7840e69b2aea0539f2f7ad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1733656816678 2024-12-08T11:20:18,642 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2659fd4862d743088571e42d9a9040e8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1733656817815 2024-12-08T11:20:18,657 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:18,659 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/0e2c90c4aa2b444db09e7d0b5c07ab06 is 50, key is test_row_0/C:col10/1733656817817/Put/seqid=0 2024-12-08T11:20:18,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741941_1117 (size=13425) 2024-12-08T11:20:18,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656878691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656878692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656878692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656878695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-08T11:20:18,732 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-08T11:20:18,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-12-08T11:20:18,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-08T11:20:18,736 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:18,737 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:18,738 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:18,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656878826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-08T11:20:18,892 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:18,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:18,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:18,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:18,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:18,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:18,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656878897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656878898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656878898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:18,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:18,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656878899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/55cb164cf3064ff6ba86c65308ee7e6b 2024-12-08T11:20:19,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-08T11:20:19,045 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/2c18a5523c5d469e880b4e5e46c1727f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/2c18a5523c5d469e880b4e5e46c1727f 2024-12-08T11:20:19,047 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,048 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/e29bd90b62604c2783fbcdc287241754 is 50, key is test_row_0/B:col10/1733656818452/Put/seqid=0 2024-12-08T11:20:19,055 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into 2c18a5523c5d469e880b4e5e46c1727f(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:19,055 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:19,055 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656818551; duration=0sec 2024-12-08T11:20:19,055 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:19,055 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:19,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741942_1118 (size=12301) 2024-12-08T11:20:19,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/e29bd90b62604c2783fbcdc287241754 2024-12-08T11:20:19,079 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/0e2c90c4aa2b444db09e7d0b5c07ab06 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/0e2c90c4aa2b444db09e7d0b5c07ab06 2024-12-08T11:20:19,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/10ef86b590e44db285ce07c5774c0905 is 50, key is test_row_0/C:col10/1733656818452/Put/seqid=0 2024-12-08T11:20:19,085 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into 0e2c90c4aa2b444db09e7d0b5c07ab06(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:19,085 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:19,085 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656818551; duration=0sec 2024-12-08T11:20:19,085 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:19,085 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:19,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741943_1119 (size=12301) 2024-12-08T11:20:19,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=491 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/10ef86b590e44db285ce07c5774c0905 2024-12-08T11:20:19,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/55cb164cf3064ff6ba86c65308ee7e6b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/55cb164cf3064ff6ba86c65308ee7e6b 2024-12-08T11:20:19,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/55cb164cf3064ff6ba86c65308ee7e6b, entries=150, sequenceid=491, filesize=12.0 K 2024-12-08T11:20:19,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/e29bd90b62604c2783fbcdc287241754 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/e29bd90b62604c2783fbcdc287241754 2024-12-08T11:20:19,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/e29bd90b62604c2783fbcdc287241754, entries=150, sequenceid=491, filesize=12.0 K 2024-12-08T11:20:19,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/10ef86b590e44db285ce07c5774c0905 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/10ef86b590e44db285ce07c5774c0905 2024-12-08T11:20:19,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/10ef86b590e44db285ce07c5774c0905, entries=150, sequenceid=491, filesize=12.0 K 2024-12-08T11:20:19,129 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 64a616eb95ce0fb49283d502a9d694a3 in 560ms, sequenceid=491, compaction requested=false 2024-12-08T11:20:19,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:19,200 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:19,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:20:19,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:19,201 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:19,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:19,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:19,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:19,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:19,202 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:19,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:19,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,202 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fc9c255a14fa44ce8aab8965c6e42ad6 is 50, key is test_row_0/A:col10/1733656818587/Put/seqid=0 2024-12-08T11:20:19,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741944_1120 (size=14741) 2024-12-08T11:20:19,242 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656879239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656879241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656879242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656879242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-08T11:20:19,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656879344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656879346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656879348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656879351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,354 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:19,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:19,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:19,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:19,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656879550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656879550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656879550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656879553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=506 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fc9c255a14fa44ce8aab8965c6e42ad6 2024-12-08T11:20:19,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f450aa1af7e94077b550524608dc1784 is 50, key is test_row_0/B:col10/1733656818587/Put/seqid=0 2024-12-08T11:20:19,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741945_1121 (size=12301) 2024-12-08T11:20:19,662 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,662 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:19,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:19,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,663 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,816 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:19,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:19,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,817 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-08T11:20:19,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656879853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,855 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656879853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656879854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:19,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656879855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,969 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:19,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:19,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:20,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=506 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f450aa1af7e94077b550524608dc1784 2024-12-08T11:20:20,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/2bdfb1e3029a49639346ba244f0f7a01 is 50, key is test_row_0/C:col10/1733656818587/Put/seqid=0 2024-12-08T11:20:20,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741946_1122 (size=12301) 2024-12-08T11:20:20,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=506 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/2bdfb1e3029a49639346ba244f0f7a01 2024-12-08T11:20:20,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/fc9c255a14fa44ce8aab8965c6e42ad6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc9c255a14fa44ce8aab8965c6e42ad6 2024-12-08T11:20:20,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc9c255a14fa44ce8aab8965c6e42ad6, entries=200, sequenceid=506, filesize=14.4 K 2024-12-08T11:20:20,098 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f450aa1af7e94077b550524608dc1784 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f450aa1af7e94077b550524608dc1784 2024-12-08T11:20:20,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f450aa1af7e94077b550524608dc1784, entries=150, sequenceid=506, filesize=12.0 K 2024-12-08T11:20:20,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/2bdfb1e3029a49639346ba244f0f7a01 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2bdfb1e3029a49639346ba244f0f7a01 2024-12-08T11:20:20,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2bdfb1e3029a49639346ba244f0f7a01, entries=150, sequenceid=506, filesize=12.0 K 2024-12-08T11:20:20,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 64a616eb95ce0fb49283d502a9d694a3 in 917ms, sequenceid=506, compaction requested=true 2024-12-08T11:20:20,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:20,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:20,118 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:20,118 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:20,118 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:20,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:20,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:20,121 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:20,121 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:20,121 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:20,121 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4b2fcfc4a9144caa875525f11af72aa6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/55cb164cf3064ff6ba86c65308ee7e6b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc9c255a14fa44ce8aab8965c6e42ad6] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=39.5 K 2024-12-08T11:20:20,121 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:20,121 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b2fcfc4a9144caa875525f11af72aa6, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1733656817815 2024-12-08T11:20:20,122 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:20,122 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:20,122 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/2c18a5523c5d469e880b4e5e46c1727f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/e29bd90b62604c2783fbcdc287241754, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f450aa1af7e94077b550524608dc1784] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.1 K 2024-12-08T11:20:20,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:20,122 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,122 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:20,122 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55cb164cf3064ff6ba86c65308ee7e6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1733656818446 2024-12-08T11:20:20,122 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c18a5523c5d469e880b4e5e46c1727f, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1733656817815 2024-12-08T11:20:20,122 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-12-08T11:20:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:20,123 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc9c255a14fa44ce8aab8965c6e42ad6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=506, earliestPutTs=1733656818585 2024-12-08T11:20:20,123 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:20:20,123 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e29bd90b62604c2783fbcdc287241754, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1733656818446 2024-12-08T11:20:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:20,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:20,124 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f450aa1af7e94077b550524608dc1784, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=506, earliestPutTs=1733656818585 2024-12-08T11:20:20,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/b0b52ede850440819e57d380930380fd is 50, key is test_row_0/A:col10/1733656819241/Put/seqid=0 2024-12-08T11:20:20,140 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#109 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:20,141 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/c6cecefb6ecc4269963616ef94e43f2a is 50, key is test_row_0/A:col10/1733656818587/Put/seqid=0 2024-12-08T11:20:20,144 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#110 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:20,145 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/da126791633d49488be16f1c0a65f462 is 50, key is test_row_0/B:col10/1733656818587/Put/seqid=0 2024-12-08T11:20:20,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741947_1123 (size=12301) 2024-12-08T11:20:20,157 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/b0b52ede850440819e57d380930380fd 2024-12-08T11:20:20,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741948_1124 (size=13527) 2024-12-08T11:20:20,172 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/c6cecefb6ecc4269963616ef94e43f2a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/c6cecefb6ecc4269963616ef94e43f2a 2024-12-08T11:20:20,179 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into c6cecefb6ecc4269963616ef94e43f2a(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:20,179 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:20,179 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656820118; duration=0sec 2024-12-08T11:20:20,179 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:20,179 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:20,179 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:20,181 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:20,181 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:20,183 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:20,183 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/0e2c90c4aa2b444db09e7d0b5c07ab06, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/10ef86b590e44db285ce07c5774c0905, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2bdfb1e3029a49639346ba244f0f7a01] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.1 K 2024-12-08T11:20:20,184 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e2c90c4aa2b444db09e7d0b5c07ab06, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=466, earliestPutTs=1733656817815 2024-12-08T11:20:20,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0faddf87818a465eb4b31cfb3d48ce25 is 50, key is test_row_0/B:col10/1733656819241/Put/seqid=0 2024-12-08T11:20:20,188 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10ef86b590e44db285ce07c5774c0905, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=491, earliestPutTs=1733656818446 2024-12-08T11:20:20,189 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2bdfb1e3029a49639346ba244f0f7a01, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=506, earliestPutTs=1733656818585 2024-12-08T11:20:20,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741949_1125 (size=13527) 2024-12-08T11:20:20,200 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/da126791633d49488be16f1c0a65f462 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/da126791633d49488be16f1c0a65f462 2024-12-08T11:20:20,209 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into da126791633d49488be16f1c0a65f462(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:20,209 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:20,210 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656820118; duration=0sec 2024-12-08T11:20:20,210 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:20,210 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:20,220 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#112 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:20,223 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/f95943e849034e339a145c6f0488ac58 is 50, key is test_row_0/C:col10/1733656818587/Put/seqid=0 2024-12-08T11:20:20,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741950_1126 (size=12301) 2024-12-08T11:20:20,237 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0faddf87818a465eb4b31cfb3d48ce25 2024-12-08T11:20:20,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741951_1127 (size=13527) 2024-12-08T11:20:20,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/08cef308b7164a23ab95b37390bc0f36 is 50, key is test_row_0/C:col10/1733656819241/Put/seqid=0 2024-12-08T11:20:20,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741952_1128 (size=12301) 2024-12-08T11:20:20,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:20,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:20,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656880371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656880371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656880372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656880373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656880476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656880476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656880479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656880482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,669 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/f95943e849034e339a145c6f0488ac58 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f95943e849034e339a145c6f0488ac58 2024-12-08T11:20:20,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656880680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656880680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,683 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into f95943e849034e339a145c6f0488ac58(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:20,683 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:20,683 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656820120; duration=0sec 2024-12-08T11:20:20,683 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:20,683 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:20,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656880683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,686 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/08cef308b7164a23ab95b37390bc0f36 2024-12-08T11:20:20,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656880688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/b0b52ede850440819e57d380930380fd as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/b0b52ede850440819e57d380930380fd 2024-12-08T11:20:20,713 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/b0b52ede850440819e57d380930380fd, entries=150, sequenceid=530, filesize=12.0 K 2024-12-08T11:20:20,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/0faddf87818a465eb4b31cfb3d48ce25 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0faddf87818a465eb4b31cfb3d48ce25 2024-12-08T11:20:20,727 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0faddf87818a465eb4b31cfb3d48ce25, entries=150, sequenceid=530, filesize=12.0 K 2024-12-08T11:20:20,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/08cef308b7164a23ab95b37390bc0f36 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/08cef308b7164a23ab95b37390bc0f36 2024-12-08T11:20:20,742 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/08cef308b7164a23ab95b37390bc0f36, entries=150, sequenceid=530, filesize=12.0 K 2024-12-08T11:20:20,746 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 64a616eb95ce0fb49283d502a9d694a3 in 623ms, sequenceid=530, compaction requested=false 2024-12-08T11:20:20,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:20,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:20,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-12-08T11:20:20,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-12-08T11:20:20,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-08T11:20:20,751 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0110 sec 2024-12-08T11:20:20,753 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 2.0180 sec 2024-12-08T11:20:20,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:20,838 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T11:20:20,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:20,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:20,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:20,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:20,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:20,838 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:20,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-12-08T11:20:20,840 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-12-08T11:20:20,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/df1283c1e4e040e3a7e86a745005b97e is 50, key is test_row_0/A:col10/1733656820370/Put/seqid=0 2024-12-08T11:20:20,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:20,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741953_1129 (size=14741) 2024-12-08T11:20:20,851 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=547 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/df1283c1e4e040e3a7e86a745005b97e 2024-12-08T11:20:20,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-12-08T11:20:20,854 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:20,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T11:20:20,855 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:20,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:20,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/92c5b19423824a439ca79bfaf7de2404 is 50, key is test_row_0/B:col10/1733656820370/Put/seqid=0 2024-12-08T11:20:20,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741954_1130 (size=12301) 2024-12-08T11:20:20,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=547 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/92c5b19423824a439ca79bfaf7de2404 2024-12-08T11:20:20,909 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/926c59da62ae40819b7415f2c70d90aa is 50, key is test_row_0/C:col10/1733656820370/Put/seqid=0 2024-12-08T11:20:20,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741955_1131 (size=12301) 2024-12-08T11:20:20,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656880938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T11:20:20,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42464 deadline: 1733656880984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42400 deadline: 1733656880985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,988 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42438 deadline: 1733656880988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:20,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:20,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42436 deadline: 1733656880993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:21,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:21,020 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-08T11:20:21,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:21,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:21,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656881041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:21,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T11:20:21,173 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:21,173 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-08T11:20:21,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:21,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:21,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:42450 deadline: 1733656881243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:21,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=547 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/926c59da62ae40819b7415f2c70d90aa 2024-12-08T11:20:21,327 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:21,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/df1283c1e4e040e3a7e86a745005b97e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/df1283c1e4e040e3a7e86a745005b97e 2024-12-08T11:20:21,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-08T11:20:21,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:21,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,328 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:21,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/df1283c1e4e040e3a7e86a745005b97e, entries=200, sequenceid=547, filesize=14.4 K 2024-12-08T11:20:21,335 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/92c5b19423824a439ca79bfaf7de2404 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/92c5b19423824a439ca79bfaf7de2404 2024-12-08T11:20:21,340 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/92c5b19423824a439ca79bfaf7de2404, entries=150, sequenceid=547, filesize=12.0 K 2024-12-08T11:20:21,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/926c59da62ae40819b7415f2c70d90aa as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/926c59da62ae40819b7415f2c70d90aa 2024-12-08T11:20:21,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/926c59da62ae40819b7415f2c70d90aa, entries=150, sequenceid=547, filesize=12.0 K 2024-12-08T11:20:21,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 64a616eb95ce0fb49283d502a9d694a3 in 511ms, sequenceid=547, compaction requested=true 2024-12-08T11:20:21,348 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:21,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:21,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:21,348 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:21,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:21,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:21,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 64a616eb95ce0fb49283d502a9d694a3:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:21,348 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:21,348 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:21,349 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40569 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:21,350 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/A is initiating minor compaction (all files) 2024-12-08T11:20:21,350 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/A in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,350 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:21,350 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/c6cecefb6ecc4269963616ef94e43f2a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/b0b52ede850440819e57d380930380fd, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/df1283c1e4e040e3a7e86a745005b97e] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=39.6 K 2024-12-08T11:20:21,350 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/B is initiating minor compaction (all files) 2024-12-08T11:20:21,350 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/B in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,350 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/da126791633d49488be16f1c0a65f462, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0faddf87818a465eb4b31cfb3d48ce25, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/92c5b19423824a439ca79bfaf7de2404] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.2 K 2024-12-08T11:20:21,350 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting c6cecefb6ecc4269963616ef94e43f2a, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=506, earliestPutTs=1733656818585 2024-12-08T11:20:21,352 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting da126791633d49488be16f1c0a65f462, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=506, earliestPutTs=1733656818585 2024-12-08T11:20:21,352 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0b52ede850440819e57d380930380fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1733656819234 2024-12-08T11:20:21,352 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 0faddf87818a465eb4b31cfb3d48ce25, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1733656819234 2024-12-08T11:20:21,353 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 92c5b19423824a439ca79bfaf7de2404, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=547, earliestPutTs=1733656820368 2024-12-08T11:20:21,353 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting df1283c1e4e040e3a7e86a745005b97e, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=547, earliestPutTs=1733656820368 2024-12-08T11:20:21,383 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#B#compaction#117 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:21,384 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/eafe6ad5b39f44e1990a2214fb23da16 is 50, key is test_row_0/B:col10/1733656820370/Put/seqid=0 2024-12-08T11:20:21,385 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#A#compaction#118 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:21,386 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/2beab166d21a4dc895ed27dcf896cd35 is 50, key is test_row_0/A:col10/1733656820370/Put/seqid=0 2024-12-08T11:20:21,391 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x527c6d40 to 127.0.0.1:63801 2024-12-08T11:20:21,391 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:63801 2024-12-08T11:20:21,391 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10c964e8 to 127.0.0.1:63801 2024-12-08T11:20:21,391 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,391 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,391 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,392 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:63801 2024-12-08T11:20:21,392 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741957_1133 (size=13629) 2024-12-08T11:20:21,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741956_1132 (size=13629) 2024-12-08T11:20:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T11:20:21,481 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:21,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-12-08T11:20:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,482 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:20:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:21,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:21,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/41925acc5cc5437e9f2b8d77091830bf is 50, key is test_row_0/A:col10/1733656820934/Put/seqid=0 2024-12-08T11:20:21,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741958_1134 (size=12301) 2024-12-08T11:20:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:21,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. as already flushing 2024-12-08T11:20:21,492 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a8f4734 to 127.0.0.1:63801 2024-12-08T11:20:21,493 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,493 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:63801 2024-12-08T11:20:21,493 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,494 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5095ba91 to 127.0.0.1:63801 2024-12-08T11:20:21,494 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,498 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x62c43377 to 127.0.0.1:63801 2024-12-08T11:20:21,498 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,550 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:63801 2024-12-08T11:20:21,550 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:21,811 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/eafe6ad5b39f44e1990a2214fb23da16 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/eafe6ad5b39f44e1990a2214fb23da16 2024-12-08T11:20:21,811 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/2beab166d21a4dc895ed27dcf896cd35 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2beab166d21a4dc895ed27dcf896cd35 2024-12-08T11:20:21,817 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/B of 64a616eb95ce0fb49283d502a9d694a3 into eafe6ad5b39f44e1990a2214fb23da16(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:21,817 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:21,817 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/B, priority=13, startTime=1733656821348; duration=0sec 2024-12-08T11:20:21,817 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:21,817 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:B 2024-12-08T11:20:21,817 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:21,817 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/A of 64a616eb95ce0fb49283d502a9d694a3 into 2beab166d21a4dc895ed27dcf896cd35(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:21,817 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:21,817 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/A, priority=13, startTime=1733656821348; duration=0sec 2024-12-08T11:20:21,817 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:21,817 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:A 2024-12-08T11:20:21,818 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:21,818 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 64a616eb95ce0fb49283d502a9d694a3/C is initiating minor compaction (all files) 2024-12-08T11:20:21,818 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 64a616eb95ce0fb49283d502a9d694a3/C in TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:21,818 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f95943e849034e339a145c6f0488ac58, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/08cef308b7164a23ab95b37390bc0f36, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/926c59da62ae40819b7415f2c70d90aa] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp, totalSize=37.2 K 2024-12-08T11:20:21,819 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f95943e849034e339a145c6f0488ac58, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=506, earliestPutTs=1733656818585 2024-12-08T11:20:21,819 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 08cef308b7164a23ab95b37390bc0f36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1733656819234 2024-12-08T11:20:21,820 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 926c59da62ae40819b7415f2c70d90aa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=547, earliestPutTs=1733656820368 2024-12-08T11:20:21,826 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 64a616eb95ce0fb49283d502a9d694a3#C#compaction#120 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:21,827 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/a28e0f0c28e74054a413474111653be4 is 50, key is test_row_0/C:col10/1733656820370/Put/seqid=0 2024-12-08T11:20:21,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741959_1135 (size=13629) 2024-12-08T11:20:21,892 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/41925acc5cc5437e9f2b8d77091830bf 2024-12-08T11:20:21,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/466c5c4f9f2b47a5bb494d08efe16c5e is 50, key is test_row_0/B:col10/1733656820934/Put/seqid=0 2024-12-08T11:20:21,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741960_1136 (size=12301) 2024-12-08T11:20:21,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T11:20:22,236 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/a28e0f0c28e74054a413474111653be4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a28e0f0c28e74054a413474111653be4 2024-12-08T11:20:22,241 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 64a616eb95ce0fb49283d502a9d694a3/C of 64a616eb95ce0fb49283d502a9d694a3 into a28e0f0c28e74054a413474111653be4(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:22,241 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:22,241 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3., storeName=64a616eb95ce0fb49283d502a9d694a3/C, priority=13, startTime=1733656821348; duration=0sec 2024-12-08T11:20:22,242 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:22,242 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 64a616eb95ce0fb49283d502a9d694a3:C 2024-12-08T11:20:22,309 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/466c5c4f9f2b47a5bb494d08efe16c5e 2024-12-08T11:20:22,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/fbd0e94693c048d897e137f6cc121735 is 50, key is test_row_0/C:col10/1733656820934/Put/seqid=0 2024-12-08T11:20:22,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741961_1137 (size=12301) 2024-12-08T11:20:22,722 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/fbd0e94693c048d897e137f6cc121735 2024-12-08T11:20:22,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/41925acc5cc5437e9f2b8d77091830bf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/41925acc5cc5437e9f2b8d77091830bf 2024-12-08T11:20:22,730 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/41925acc5cc5437e9f2b8d77091830bf, entries=150, sequenceid=569, filesize=12.0 K 2024-12-08T11:20:22,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/466c5c4f9f2b47a5bb494d08efe16c5e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/466c5c4f9f2b47a5bb494d08efe16c5e 2024-12-08T11:20:22,735 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/466c5c4f9f2b47a5bb494d08efe16c5e, entries=150, sequenceid=569, filesize=12.0 K 2024-12-08T11:20:22,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/fbd0e94693c048d897e137f6cc121735 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fbd0e94693c048d897e137f6cc121735 2024-12-08T11:20:22,739 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fbd0e94693c048d897e137f6cc121735, entries=150, sequenceid=569, filesize=12.0 K 2024-12-08T11:20:22,740 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=33.54 KB/34350 for 64a616eb95ce0fb49283d502a9d694a3 in 1258ms, sequenceid=569, compaction requested=false 2024-12-08T11:20:22,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:22,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:22,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-12-08T11:20:22,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-12-08T11:20:22,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-08T11:20:22,742 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8860 sec 2024-12-08T11:20:22,744 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 1.8960 sec 2024-12-08T11:20:22,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-08T11:20:22,960 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 91 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 79 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 95 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 98 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5064 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4959 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2233 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6697 rows 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2232 2024-12-08T11:20:22,960 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6696 rows 2024-12-08T11:20:22,960 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T11:20:22,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e67f019 to 127.0.0.1:63801 2024-12-08T11:20:22,960 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:22,964 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T11:20:22,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T11:20:22,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:22,976 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656822976"}]},"ts":"1733656822976"} 2024-12-08T11:20:22,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-08T11:20:22,977 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T11:20:22,979 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T11:20:22,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:20:22,985 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=64a616eb95ce0fb49283d502a9d694a3, UNASSIGN}] 2024-12-08T11:20:22,985 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=38, ppid=37, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=64a616eb95ce0fb49283d502a9d694a3, UNASSIGN 2024-12-08T11:20:22,986 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=64a616eb95ce0fb49283d502a9d694a3, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:22,987 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:20:22,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; CloseRegionProcedure 64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:20:23,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-08T11:20:23,142 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:23,143 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(124): Close 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:23,144 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:20:23,144 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1681): Closing 64a616eb95ce0fb49283d502a9d694a3, disabling compactions & flushes 2024-12-08T11:20:23,144 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:23,144 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:23,144 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. after waiting 0 ms 2024-12-08T11:20:23,144 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:23,144 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(2837): Flushing 64a616eb95ce0fb49283d502a9d694a3 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T11:20:23,145 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=A 2024-12-08T11:20:23,145 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:23,145 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=B 2024-12-08T11:20:23,145 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:23,145 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 64a616eb95ce0fb49283d502a9d694a3, store=C 2024-12-08T11:20:23,145 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:23,149 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/364372ba3d4e4ff898bbe8c829de459f is 50, key is test_row_0/A:col10/1733656821492/Put/seqid=0 2024-12-08T11:20:23,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741962_1138 (size=12301) 2024-12-08T11:20:23,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-08T11:20:23,553 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/364372ba3d4e4ff898bbe8c829de459f 2024-12-08T11:20:23,561 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f96fd2ba6e7d487785cda040c1bb6837 is 50, key is test_row_0/B:col10/1733656821492/Put/seqid=0 2024-12-08T11:20:23,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741963_1139 (size=12301) 2024-12-08T11:20:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-08T11:20:23,828 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T11:20:23,966 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f96fd2ba6e7d487785cda040c1bb6837 2024-12-08T11:20:23,973 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/306f0946c374491893e007f3ea5ff61f is 50, key is test_row_0/C:col10/1733656821492/Put/seqid=0 2024-12-08T11:20:23,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741964_1140 (size=12301) 2024-12-08T11:20:24,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-08T11:20:24,378 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=580 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/306f0946c374491893e007f3ea5ff61f 2024-12-08T11:20:24,383 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/A/364372ba3d4e4ff898bbe8c829de459f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/364372ba3d4e4ff898bbe8c829de459f 2024-12-08T11:20:24,388 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/364372ba3d4e4ff898bbe8c829de459f, entries=150, sequenceid=580, filesize=12.0 K 2024-12-08T11:20:24,389 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/B/f96fd2ba6e7d487785cda040c1bb6837 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f96fd2ba6e7d487785cda040c1bb6837 2024-12-08T11:20:24,393 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f96fd2ba6e7d487785cda040c1bb6837, entries=150, sequenceid=580, filesize=12.0 K 2024-12-08T11:20:24,394 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/.tmp/C/306f0946c374491893e007f3ea5ff61f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/306f0946c374491893e007f3ea5ff61f 2024-12-08T11:20:24,398 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/306f0946c374491893e007f3ea5ff61f, entries=150, sequenceid=580, filesize=12.0 K 2024-12-08T11:20:24,399 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 64a616eb95ce0fb49283d502a9d694a3 in 1255ms, sequenceid=580, compaction requested=true 2024-12-08T11:20:24,400 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/50a6a24f32544d12bb7fe4e53f6b4eaf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/293eff860ed2470398355204ab198652, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/13860169c0734ce2a4fe3b7bfdc2ffbb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7769d199242848e0a7fcf39c20bc7a7e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fc72ebad50c417fade96ad1f982be65, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a925913452d74124bcb0244a96640f3e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bedb38e2df7946a68acc7a18ef3145fe, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/efac7e74ad4f4680894518ce2c5d60db, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/47f217a6f12b497d8b493a491e3f50aa, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6dd5ae5fc8554ea68ce824d77c7f1f11, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6d23ca4781a14b71ad7d137a9d73ee54, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a4da11bc4b734cf89a713e365071ae7e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4e0e99048778416f9a8f56ac9d3879ae, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/1ead987fc16641d5a53b72d55a1f7b99, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fce719e2bf744415bfeb7322ab54dfeb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/10af5c4d365f4be79208df88c0aa95f7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/23f3ccda7b5b4774ae59d5a99442c881, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bc40e21cb69d4ead8631e78df287a613, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fd0b1765deb4eb3baed3bf5f244910a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/923b17581e234b718c6baa22c0cdb6d9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e0de9024f90c4c01a6de493e3e4765c6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2cf2452e39094e7190245297f7e7d9ee, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/9a4123b9b7dc4716aac5e486981b77a0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7f78fbce47d24f0db6517c92b9a9aa13, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/8bf91cb5c3704eaf83223953cb0e398d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e8284af994874039956e55c871c5c496, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/f6e94a4f3a974b2493ff06af113343d6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bcddc008914e42f5824c19d2b2ec297c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/933fdf2849a349158e16a7206401bda8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/0d848023e47d4357bdc5ad41d56f5f8a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a1a65910e3cf4eb59148a5bbd24b7574, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc7d5d7dcd60462da6ad8deb8fc84bf9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4b2fcfc4a9144caa875525f11af72aa6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/ac256a2c165341068ba89c758142f2b5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/55cb164cf3064ff6ba86c65308ee7e6b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc9c255a14fa44ce8aab8965c6e42ad6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/c6cecefb6ecc4269963616ef94e43f2a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/b0b52ede850440819e57d380930380fd, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/df1283c1e4e040e3a7e86a745005b97e] to archive 2024-12-08T11:20:24,403 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:20:24,408 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/50a6a24f32544d12bb7fe4e53f6b4eaf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/50a6a24f32544d12bb7fe4e53f6b4eaf 2024-12-08T11:20:24,409 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/293eff860ed2470398355204ab198652 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/293eff860ed2470398355204ab198652 2024-12-08T11:20:24,410 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/13860169c0734ce2a4fe3b7bfdc2ffbb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/13860169c0734ce2a4fe3b7bfdc2ffbb 2024-12-08T11:20:24,412 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7769d199242848e0a7fcf39c20bc7a7e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7769d199242848e0a7fcf39c20bc7a7e 2024-12-08T11:20:24,413 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fc72ebad50c417fade96ad1f982be65 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fc72ebad50c417fade96ad1f982be65 2024-12-08T11:20:24,414 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a925913452d74124bcb0244a96640f3e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a925913452d74124bcb0244a96640f3e 2024-12-08T11:20:24,415 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bedb38e2df7946a68acc7a18ef3145fe to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bedb38e2df7946a68acc7a18ef3145fe 2024-12-08T11:20:24,417 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/efac7e74ad4f4680894518ce2c5d60db to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/efac7e74ad4f4680894518ce2c5d60db 2024-12-08T11:20:24,418 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/47f217a6f12b497d8b493a491e3f50aa to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/47f217a6f12b497d8b493a491e3f50aa 2024-12-08T11:20:24,419 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6dd5ae5fc8554ea68ce824d77c7f1f11 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6dd5ae5fc8554ea68ce824d77c7f1f11 2024-12-08T11:20:24,421 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6d23ca4781a14b71ad7d137a9d73ee54 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/6d23ca4781a14b71ad7d137a9d73ee54 2024-12-08T11:20:24,422 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a4da11bc4b734cf89a713e365071ae7e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a4da11bc4b734cf89a713e365071ae7e 2024-12-08T11:20:24,423 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4e0e99048778416f9a8f56ac9d3879ae to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4e0e99048778416f9a8f56ac9d3879ae 2024-12-08T11:20:24,425 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/1ead987fc16641d5a53b72d55a1f7b99 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/1ead987fc16641d5a53b72d55a1f7b99 2024-12-08T11:20:24,426 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fce719e2bf744415bfeb7322ab54dfeb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fce719e2bf744415bfeb7322ab54dfeb 2024-12-08T11:20:24,428 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/10af5c4d365f4be79208df88c0aa95f7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/10af5c4d365f4be79208df88c0aa95f7 2024-12-08T11:20:24,429 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/23f3ccda7b5b4774ae59d5a99442c881 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/23f3ccda7b5b4774ae59d5a99442c881 2024-12-08T11:20:24,430 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bc40e21cb69d4ead8631e78df287a613 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bc40e21cb69d4ead8631e78df287a613 2024-12-08T11:20:24,431 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fd0b1765deb4eb3baed3bf5f244910a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7fd0b1765deb4eb3baed3bf5f244910a 2024-12-08T11:20:24,433 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/923b17581e234b718c6baa22c0cdb6d9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/923b17581e234b718c6baa22c0cdb6d9 2024-12-08T11:20:24,434 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e0de9024f90c4c01a6de493e3e4765c6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e0de9024f90c4c01a6de493e3e4765c6 2024-12-08T11:20:24,435 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2cf2452e39094e7190245297f7e7d9ee to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2cf2452e39094e7190245297f7e7d9ee 2024-12-08T11:20:24,437 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/9a4123b9b7dc4716aac5e486981b77a0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/9a4123b9b7dc4716aac5e486981b77a0 2024-12-08T11:20:24,438 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7f78fbce47d24f0db6517c92b9a9aa13 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/7f78fbce47d24f0db6517c92b9a9aa13 2024-12-08T11:20:24,439 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/8bf91cb5c3704eaf83223953cb0e398d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/8bf91cb5c3704eaf83223953cb0e398d 2024-12-08T11:20:24,440 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e8284af994874039956e55c871c5c496 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/e8284af994874039956e55c871c5c496 2024-12-08T11:20:24,441 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/f6e94a4f3a974b2493ff06af113343d6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/f6e94a4f3a974b2493ff06af113343d6 2024-12-08T11:20:24,443 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bcddc008914e42f5824c19d2b2ec297c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/bcddc008914e42f5824c19d2b2ec297c 2024-12-08T11:20:24,444 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/933fdf2849a349158e16a7206401bda8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/933fdf2849a349158e16a7206401bda8 2024-12-08T11:20:24,445 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/0d848023e47d4357bdc5ad41d56f5f8a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/0d848023e47d4357bdc5ad41d56f5f8a 2024-12-08T11:20:24,446 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a1a65910e3cf4eb59148a5bbd24b7574 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/a1a65910e3cf4eb59148a5bbd24b7574 2024-12-08T11:20:24,447 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc7d5d7dcd60462da6ad8deb8fc84bf9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc7d5d7dcd60462da6ad8deb8fc84bf9 2024-12-08T11:20:24,449 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4b2fcfc4a9144caa875525f11af72aa6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/4b2fcfc4a9144caa875525f11af72aa6 2024-12-08T11:20:24,450 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/ac256a2c165341068ba89c758142f2b5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/ac256a2c165341068ba89c758142f2b5 2024-12-08T11:20:24,451 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/55cb164cf3064ff6ba86c65308ee7e6b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/55cb164cf3064ff6ba86c65308ee7e6b 2024-12-08T11:20:24,452 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc9c255a14fa44ce8aab8965c6e42ad6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/fc9c255a14fa44ce8aab8965c6e42ad6 2024-12-08T11:20:24,454 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/c6cecefb6ecc4269963616ef94e43f2a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/c6cecefb6ecc4269963616ef94e43f2a 2024-12-08T11:20:24,455 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/b0b52ede850440819e57d380930380fd to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/b0b52ede850440819e57d380930380fd 2024-12-08T11:20:24,456 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/df1283c1e4e040e3a7e86a745005b97e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/df1283c1e4e040e3a7e86a745005b97e 2024-12-08T11:20:24,472 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/099ac04d3d7d429bb2d70c99b843ca5c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/d8cc6aae67124cdfa7070dc54112adea, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f374c8961859440fbeee7e5996b8db06, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/80e8427d291b400b893d3e0be2651874, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b90485eb48174693b2ea162d84902077, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bf8a1971bfce4a7ea8c58d8d11e346a7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/930aa1e4e2194cb18648930248847067, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7f641b9e61d94a95aad4e03db73bbb34, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3d124f7e89f14e4f9f32ead41ec7f4d2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/fd2e13716169465dbff2182404c9e864, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/cb304774268a4393ab85fece6c78cf1b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bb1fd38f3f994c6f8008c79a3fb4ded4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/ac9ff35427094a83921aba92f7d2ea1f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0c918b4b206f43edbc8a6b0966bd2139, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/29680b311e644785ab5a57f4390faefa, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/a4a0d0f099124354ae9f9a8fa5490e2b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/be33ef0c7d874b5ab2a218ce53453c7f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/18512f634e0547a5b90e55685d5cfaf8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/66a2a00c6a6648749353a496eafda6ba, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b5c48dbb004844e786c0e41874fa6657, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/98cac4abf20b43ffa24dc49a822f0769, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/030ac886b3264b8daa20204b57ab8af1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3f12321c41fc43bc994255f01b35c178, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/70ed2c5e061c469e9427ff23720f2c25, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/392fba09ecbf4fbbb3181724dba3cc3c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b8a2a45b9f084398b186dd30294704e2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0445a237159046b1b22afa02c398e09a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/adaaaca5e2a34b5a8dd1b6a539924052, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/84fb3a79993844cc87196896be70b664, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/46379948e34349b3aeead14519396bcc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/af9d2367fd0045c190886c61ec66ab25, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7d2ead286b5048c8b5b4747b3bb2adc6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/2c18a5523c5d469e880b4e5e46c1727f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7adbf2671a6c47389a7129985326fef7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/e29bd90b62604c2783fbcdc287241754, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/da126791633d49488be16f1c0a65f462, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f450aa1af7e94077b550524608dc1784, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0faddf87818a465eb4b31cfb3d48ce25, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/92c5b19423824a439ca79bfaf7de2404] to archive 2024-12-08T11:20:24,473 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:20:24,475 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/099ac04d3d7d429bb2d70c99b843ca5c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/099ac04d3d7d429bb2d70c99b843ca5c 2024-12-08T11:20:24,476 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/d8cc6aae67124cdfa7070dc54112adea to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/d8cc6aae67124cdfa7070dc54112adea 2024-12-08T11:20:24,477 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f374c8961859440fbeee7e5996b8db06 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f374c8961859440fbeee7e5996b8db06 2024-12-08T11:20:24,479 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/80e8427d291b400b893d3e0be2651874 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/80e8427d291b400b893d3e0be2651874 2024-12-08T11:20:24,480 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b90485eb48174693b2ea162d84902077 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b90485eb48174693b2ea162d84902077 2024-12-08T11:20:24,481 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bf8a1971bfce4a7ea8c58d8d11e346a7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bf8a1971bfce4a7ea8c58d8d11e346a7 2024-12-08T11:20:24,482 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/930aa1e4e2194cb18648930248847067 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/930aa1e4e2194cb18648930248847067 2024-12-08T11:20:24,483 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7f641b9e61d94a95aad4e03db73bbb34 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7f641b9e61d94a95aad4e03db73bbb34 2024-12-08T11:20:24,485 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3d124f7e89f14e4f9f32ead41ec7f4d2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3d124f7e89f14e4f9f32ead41ec7f4d2 2024-12-08T11:20:24,486 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/fd2e13716169465dbff2182404c9e864 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/fd2e13716169465dbff2182404c9e864 2024-12-08T11:20:24,487 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/cb304774268a4393ab85fece6c78cf1b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/cb304774268a4393ab85fece6c78cf1b 2024-12-08T11:20:24,488 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bb1fd38f3f994c6f8008c79a3fb4ded4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/bb1fd38f3f994c6f8008c79a3fb4ded4 2024-12-08T11:20:24,490 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/ac9ff35427094a83921aba92f7d2ea1f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/ac9ff35427094a83921aba92f7d2ea1f 2024-12-08T11:20:24,491 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0c918b4b206f43edbc8a6b0966bd2139 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0c918b4b206f43edbc8a6b0966bd2139 2024-12-08T11:20:24,492 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/29680b311e644785ab5a57f4390faefa to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/29680b311e644785ab5a57f4390faefa 2024-12-08T11:20:24,493 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/a4a0d0f099124354ae9f9a8fa5490e2b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/a4a0d0f099124354ae9f9a8fa5490e2b 2024-12-08T11:20:24,494 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/be33ef0c7d874b5ab2a218ce53453c7f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/be33ef0c7d874b5ab2a218ce53453c7f 2024-12-08T11:20:24,496 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/18512f634e0547a5b90e55685d5cfaf8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/18512f634e0547a5b90e55685d5cfaf8 2024-12-08T11:20:24,497 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/66a2a00c6a6648749353a496eafda6ba to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/66a2a00c6a6648749353a496eafda6ba 2024-12-08T11:20:24,498 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b5c48dbb004844e786c0e41874fa6657 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b5c48dbb004844e786c0e41874fa6657 2024-12-08T11:20:24,499 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/98cac4abf20b43ffa24dc49a822f0769 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/98cac4abf20b43ffa24dc49a822f0769 2024-12-08T11:20:24,500 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/030ac886b3264b8daa20204b57ab8af1 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/030ac886b3264b8daa20204b57ab8af1 2024-12-08T11:20:24,501 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3f12321c41fc43bc994255f01b35c178 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/3f12321c41fc43bc994255f01b35c178 2024-12-08T11:20:24,502 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/70ed2c5e061c469e9427ff23720f2c25 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/70ed2c5e061c469e9427ff23720f2c25 2024-12-08T11:20:24,503 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/392fba09ecbf4fbbb3181724dba3cc3c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/392fba09ecbf4fbbb3181724dba3cc3c 2024-12-08T11:20:24,505 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b8a2a45b9f084398b186dd30294704e2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/b8a2a45b9f084398b186dd30294704e2 2024-12-08T11:20:24,506 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0445a237159046b1b22afa02c398e09a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0445a237159046b1b22afa02c398e09a 2024-12-08T11:20:24,507 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/adaaaca5e2a34b5a8dd1b6a539924052 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/adaaaca5e2a34b5a8dd1b6a539924052 2024-12-08T11:20:24,508 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/84fb3a79993844cc87196896be70b664 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/84fb3a79993844cc87196896be70b664 2024-12-08T11:20:24,509 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/46379948e34349b3aeead14519396bcc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/46379948e34349b3aeead14519396bcc 2024-12-08T11:20:24,510 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/af9d2367fd0045c190886c61ec66ab25 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/af9d2367fd0045c190886c61ec66ab25 2024-12-08T11:20:24,511 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7d2ead286b5048c8b5b4747b3bb2adc6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7d2ead286b5048c8b5b4747b3bb2adc6 2024-12-08T11:20:24,512 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/2c18a5523c5d469e880b4e5e46c1727f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/2c18a5523c5d469e880b4e5e46c1727f 2024-12-08T11:20:24,513 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7adbf2671a6c47389a7129985326fef7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/7adbf2671a6c47389a7129985326fef7 2024-12-08T11:20:24,515 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/e29bd90b62604c2783fbcdc287241754 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/e29bd90b62604c2783fbcdc287241754 2024-12-08T11:20:24,516 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/da126791633d49488be16f1c0a65f462 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/da126791633d49488be16f1c0a65f462 2024-12-08T11:20:24,517 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f450aa1af7e94077b550524608dc1784 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f450aa1af7e94077b550524608dc1784 2024-12-08T11:20:24,518 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0faddf87818a465eb4b31cfb3d48ce25 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/0faddf87818a465eb4b31cfb3d48ce25 2024-12-08T11:20:24,519 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/92c5b19423824a439ca79bfaf7de2404 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/92c5b19423824a439ca79bfaf7de2404 2024-12-08T11:20:24,521 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/34f1408c07224d0f990c8641fbb5c94d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9c943a7ea018451abdebd231b3d42351, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/27810acbe6174030b6259bb75cc2523e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ce394f6df090471c93b8143dccd9b9dc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c79db04aca494941b4a086f366ea8ea2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/e43786be13504d1991302eb944caba07, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/8bcbd0fa20d34245ac90026d9829d8ad, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/77ef602f9d2d4ee2aaf7d99e011f5445, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/201fd910304e4cf18211e19120174e59, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c86d5bd7027f4bb786aa32c7654e76bb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9602a33cdbe343f991644f264ca0b151, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/d8b6fd1e36df4c8589cc5d034134b82c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/26494f189ae94873a96f45f1f5519e7f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a323098ff7bc48ddab81d666e7efcc2f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/b2b60fca242640df8c1180ce867fdaba, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/695e850de61d4c329c2ae1f680f2edef, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1dbe2c91838949269bd3c7a13b4f1e22, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/cb87fc78e78f46cb8aea3cece2be5d70, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6578d626933b4451b8324c44ee9ae071, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/61a7eac286444630bd337baa9ae886a6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6879cb6790e94bf8822d70edeba59a8a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/7183f89cbe724cd18be8876d42fc817b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ec0e5559cdbb40aa96fabeff1765d925, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6e7d0dd93a79414ba1acbc53b7e40076, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/58fa07a08cf346339fb76a7a35f1664a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c294cb9563d04b858d3f3df342f2ecfb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/5f93055419f046138e6f498d526bfff6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/90d4d8485724487b8d7f0417dea92ce4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a3c71ccf51794b938d2e1ff12f4f3c70, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f2323fa6d74f4965974cb84ced38e64c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1ab145cf7b7840e69b2aea0539f2f7ad, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/0e2c90c4aa2b444db09e7d0b5c07ab06, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2659fd4862d743088571e42d9a9040e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/10ef86b590e44db285ce07c5774c0905, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f95943e849034e339a145c6f0488ac58, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2bdfb1e3029a49639346ba244f0f7a01, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/08cef308b7164a23ab95b37390bc0f36, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/926c59da62ae40819b7415f2c70d90aa] to archive 2024-12-08T11:20:24,522 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:20:24,524 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/34f1408c07224d0f990c8641fbb5c94d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/34f1408c07224d0f990c8641fbb5c94d 2024-12-08T11:20:24,525 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9c943a7ea018451abdebd231b3d42351 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9c943a7ea018451abdebd231b3d42351 2024-12-08T11:20:24,526 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/27810acbe6174030b6259bb75cc2523e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/27810acbe6174030b6259bb75cc2523e 2024-12-08T11:20:24,527 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ce394f6df090471c93b8143dccd9b9dc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ce394f6df090471c93b8143dccd9b9dc 2024-12-08T11:20:24,528 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c79db04aca494941b4a086f366ea8ea2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c79db04aca494941b4a086f366ea8ea2 2024-12-08T11:20:24,529 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/e43786be13504d1991302eb944caba07 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/e43786be13504d1991302eb944caba07 2024-12-08T11:20:24,530 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fb70e39fb9dc4b2c9abf5ceaae7f8c9a 2024-12-08T11:20:24,532 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/8bcbd0fa20d34245ac90026d9829d8ad to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/8bcbd0fa20d34245ac90026d9829d8ad 2024-12-08T11:20:24,533 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/77ef602f9d2d4ee2aaf7d99e011f5445 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/77ef602f9d2d4ee2aaf7d99e011f5445 2024-12-08T11:20:24,534 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/201fd910304e4cf18211e19120174e59 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/201fd910304e4cf18211e19120174e59 2024-12-08T11:20:24,535 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c86d5bd7027f4bb786aa32c7654e76bb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c86d5bd7027f4bb786aa32c7654e76bb 2024-12-08T11:20:24,537 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9602a33cdbe343f991644f264ca0b151 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/9602a33cdbe343f991644f264ca0b151 2024-12-08T11:20:24,538 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/d8b6fd1e36df4c8589cc5d034134b82c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/d8b6fd1e36df4c8589cc5d034134b82c 2024-12-08T11:20:24,539 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/26494f189ae94873a96f45f1f5519e7f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/26494f189ae94873a96f45f1f5519e7f 2024-12-08T11:20:24,540 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a323098ff7bc48ddab81d666e7efcc2f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a323098ff7bc48ddab81d666e7efcc2f 2024-12-08T11:20:24,542 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/b2b60fca242640df8c1180ce867fdaba to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/b2b60fca242640df8c1180ce867fdaba 2024-12-08T11:20:24,543 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/695e850de61d4c329c2ae1f680f2edef to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/695e850de61d4c329c2ae1f680f2edef 2024-12-08T11:20:24,544 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1dbe2c91838949269bd3c7a13b4f1e22 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1dbe2c91838949269bd3c7a13b4f1e22 2024-12-08T11:20:24,545 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/cb87fc78e78f46cb8aea3cece2be5d70 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/cb87fc78e78f46cb8aea3cece2be5d70 2024-12-08T11:20:24,546 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6578d626933b4451b8324c44ee9ae071 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6578d626933b4451b8324c44ee9ae071 2024-12-08T11:20:24,547 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/61a7eac286444630bd337baa9ae886a6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/61a7eac286444630bd337baa9ae886a6 2024-12-08T11:20:24,548 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6879cb6790e94bf8822d70edeba59a8a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6879cb6790e94bf8822d70edeba59a8a 2024-12-08T11:20:24,549 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/7183f89cbe724cd18be8876d42fc817b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/7183f89cbe724cd18be8876d42fc817b 2024-12-08T11:20:24,550 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ec0e5559cdbb40aa96fabeff1765d925 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/ec0e5559cdbb40aa96fabeff1765d925 2024-12-08T11:20:24,552 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6e7d0dd93a79414ba1acbc53b7e40076 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/6e7d0dd93a79414ba1acbc53b7e40076 2024-12-08T11:20:24,553 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/58fa07a08cf346339fb76a7a35f1664a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/58fa07a08cf346339fb76a7a35f1664a 2024-12-08T11:20:24,554 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c294cb9563d04b858d3f3df342f2ecfb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/c294cb9563d04b858d3f3df342f2ecfb 2024-12-08T11:20:24,556 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/5f93055419f046138e6f498d526bfff6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/5f93055419f046138e6f498d526bfff6 2024-12-08T11:20:24,557 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/90d4d8485724487b8d7f0417dea92ce4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/90d4d8485724487b8d7f0417dea92ce4 2024-12-08T11:20:24,559 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a3c71ccf51794b938d2e1ff12f4f3c70 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a3c71ccf51794b938d2e1ff12f4f3c70 2024-12-08T11:20:24,560 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f2323fa6d74f4965974cb84ced38e64c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f2323fa6d74f4965974cb84ced38e64c 2024-12-08T11:20:24,561 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1ab145cf7b7840e69b2aea0539f2f7ad to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/1ab145cf7b7840e69b2aea0539f2f7ad 2024-12-08T11:20:24,562 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/0e2c90c4aa2b444db09e7d0b5c07ab06 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/0e2c90c4aa2b444db09e7d0b5c07ab06 2024-12-08T11:20:24,563 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2659fd4862d743088571e42d9a9040e8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2659fd4862d743088571e42d9a9040e8 2024-12-08T11:20:24,564 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/10ef86b590e44db285ce07c5774c0905 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/10ef86b590e44db285ce07c5774c0905 2024-12-08T11:20:24,565 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f95943e849034e339a145c6f0488ac58 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/f95943e849034e339a145c6f0488ac58 2024-12-08T11:20:24,566 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2bdfb1e3029a49639346ba244f0f7a01 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/2bdfb1e3029a49639346ba244f0f7a01 2024-12-08T11:20:24,568 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/08cef308b7164a23ab95b37390bc0f36 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/08cef308b7164a23ab95b37390bc0f36 2024-12-08T11:20:24,569 DEBUG [StoreCloser-TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/926c59da62ae40819b7415f2c70d90aa to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/926c59da62ae40819b7415f2c70d90aa 2024-12-08T11:20:24,573 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/recovered.edits/583.seqid, newMaxSeqId=583, maxSeqId=1 2024-12-08T11:20:24,576 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3. 2024-12-08T11:20:24,576 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] regionserver.HRegion(1635): Region close journal for 64a616eb95ce0fb49283d502a9d694a3: 2024-12-08T11:20:24,577 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=39}] handler.UnassignRegionHandler(170): Closed 64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:24,578 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=38 updating hbase:meta row=64a616eb95ce0fb49283d502a9d694a3, regionState=CLOSED 2024-12-08T11:20:24,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-12-08T11:20:24,580 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseRegionProcedure 64a616eb95ce0fb49283d502a9d694a3, server=355ef6e50110,46083,1733656795491 in 1.5920 sec 2024-12-08T11:20:24,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=37 2024-12-08T11:20:24,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=37, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=64a616eb95ce0fb49283d502a9d694a3, UNASSIGN in 1.5950 sec 2024-12-08T11:20:24,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-08T11:20:24,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6010 sec 2024-12-08T11:20:24,584 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656824584"}]},"ts":"1733656824584"} 2024-12-08T11:20:24,585 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T11:20:24,587 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T11:20:24,588 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6160 sec 2024-12-08T11:20:25,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-12-08T11:20:25,081 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-12-08T11:20:25,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T11:20:25,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:25,089 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=40, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:25,090 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=40, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:25,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-08T11:20:25,093 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:25,097 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/recovered.edits] 2024-12-08T11:20:25,100 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2beab166d21a4dc895ed27dcf896cd35 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/2beab166d21a4dc895ed27dcf896cd35 2024-12-08T11:20:25,101 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/364372ba3d4e4ff898bbe8c829de459f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/364372ba3d4e4ff898bbe8c829de459f 2024-12-08T11:20:25,103 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/41925acc5cc5437e9f2b8d77091830bf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/A/41925acc5cc5437e9f2b8d77091830bf 2024-12-08T11:20:25,105 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/466c5c4f9f2b47a5bb494d08efe16c5e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/466c5c4f9f2b47a5bb494d08efe16c5e 2024-12-08T11:20:25,107 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/eafe6ad5b39f44e1990a2214fb23da16 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/eafe6ad5b39f44e1990a2214fb23da16 2024-12-08T11:20:25,108 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f96fd2ba6e7d487785cda040c1bb6837 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/B/f96fd2ba6e7d487785cda040c1bb6837 2024-12-08T11:20:25,110 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/306f0946c374491893e007f3ea5ff61f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/306f0946c374491893e007f3ea5ff61f 2024-12-08T11:20:25,111 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a28e0f0c28e74054a413474111653be4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/a28e0f0c28e74054a413474111653be4 2024-12-08T11:20:25,112 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fbd0e94693c048d897e137f6cc121735 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/C/fbd0e94693c048d897e137f6cc121735 2024-12-08T11:20:25,115 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/recovered.edits/583.seqid to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3/recovered.edits/583.seqid 2024-12-08T11:20:25,115 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/64a616eb95ce0fb49283d502a9d694a3 2024-12-08T11:20:25,115 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T11:20:25,121 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=40, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:25,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-08T11:20:25,128 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T11:20:25,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T11:20:25,161 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=40, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:25,161 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T11:20:25,161 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733656825161"}]},"ts":"9223372036854775807"} 2024-12-08T11:20:25,165 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T11:20:25,165 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 64a616eb95ce0fb49283d502a9d694a3, NAME => 'TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T11:20:25,165 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T11:20:25,165 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733656825165"}]},"ts":"9223372036854775807"} 2024-12-08T11:20:25,167 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T11:20:25,170 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=40, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:25,171 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 86 msec 2024-12-08T11:20:25,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=40 2024-12-08T11:20:25,191 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 40 completed 2024-12-08T11:20:25,202 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1577542464_22 at /127.0.0.1:43252 [Waiting for operation #402] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/355ef6e50110:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;355ef6e50110:46083-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-858952241_22 at /127.0.0.1:43370 [Waiting for operation #392] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=455 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=435 (was 242) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6513 (was 7432) 2024-12-08T11:20:25,211 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=435, ProcessCount=11, AvailableMemoryMB=6512 2024-12-08T11:20:25,212 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:20:25,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:20:25,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:25,215 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T11:20:25,215 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:25,215 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 41 2024-12-08T11:20:25,216 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T11:20:25,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-08T11:20:25,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741965_1141 (size=963) 2024-12-08T11:20:25,235 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-08T11:20:25,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-08T11:20:25,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-08T11:20:25,624 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:20:25,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741966_1142 (size=53) 2024-12-08T11:20:25,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-08T11:20:26,031 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:20:26,031 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing e39fb8694c3bacc24718f77e81d6379f, disabling compactions & flushes 2024-12-08T11:20:26,031 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,031 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,031 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. after waiting 0 ms 2024-12-08T11:20:26,031 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,031 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,031 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:26,033 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T11:20:26,033 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733656826033"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733656826033"}]},"ts":"1733656826033"} 2024-12-08T11:20:26,034 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T11:20:26,035 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T11:20:26,035 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656826035"}]},"ts":"1733656826035"} 2024-12-08T11:20:26,036 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T11:20:26,043 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, ASSIGN}] 2024-12-08T11:20:26,043 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, ASSIGN 2024-12-08T11:20:26,044 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, ASSIGN; state=OFFLINE, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=false 2024-12-08T11:20:26,195 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:26,196 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=43, ppid=42, state=RUNNABLE; OpenRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:20:26,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-08T11:20:26,348 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:26,351 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,352 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7285): Opening region: {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:20:26,352 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,352 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:20:26,352 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7327): checking encryption for e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,352 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(7330): checking classloading for e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,354 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,355 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:26,355 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e39fb8694c3bacc24718f77e81d6379f columnFamilyName A 2024-12-08T11:20:26,355 DEBUG [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:26,356 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(327): Store=e39fb8694c3bacc24718f77e81d6379f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:26,356 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,357 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:26,358 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e39fb8694c3bacc24718f77e81d6379f columnFamilyName B 2024-12-08T11:20:26,358 DEBUG [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:26,358 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(327): Store=e39fb8694c3bacc24718f77e81d6379f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:26,358 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,359 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:26,359 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e39fb8694c3bacc24718f77e81d6379f columnFamilyName C 2024-12-08T11:20:26,359 DEBUG [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:26,360 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(327): Store=e39fb8694c3bacc24718f77e81d6379f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:26,360 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,361 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,361 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,362 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:20:26,364 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1085): writing seq id for e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:26,366 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:20:26,366 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1102): Opened e39fb8694c3bacc24718f77e81d6379f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61831195, jitterRate=-0.07864339649677277}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:20:26,367 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegion(1001): Region open journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:26,367 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., pid=43, masterSystemTime=1733656826348 2024-12-08T11:20:26,369 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,369 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=43}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:26,369 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:26,372 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=42 2024-12-08T11:20:26,372 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=42, state=SUCCESS; OpenRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 in 174 msec 2024-12-08T11:20:26,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-08T11:20:26,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, ASSIGN in 330 msec 2024-12-08T11:20:26,374 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T11:20:26,374 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656826374"}]},"ts":"1733656826374"} 2024-12-08T11:20:26,375 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T11:20:26,378 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T11:20:26,379 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1650 sec 2024-12-08T11:20:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-08T11:20:27,322 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-08T11:20:27,324 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4e560c7b to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ddf4c3 2024-12-08T11:20:27,327 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ff872d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:27,329 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:27,331 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57706, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:27,333 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T11:20:27,334 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52930, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T11:20:27,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:20:27,339 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:20:27,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=44, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:27,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741967_1143 (size=999) 2024-12-08T11:20:27,759 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-08T11:20:27,759 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-08T11:20:27,763 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:20:27,774 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, REOPEN/MOVE}] 2024-12-08T11:20:27,775 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, REOPEN/MOVE 2024-12-08T11:20:27,775 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:27,777 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:20:27,777 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; CloseRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:20:27,928 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:27,929 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(124): Close e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:27,929 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:20:27,929 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1681): Closing e39fb8694c3bacc24718f77e81d6379f, disabling compactions & flushes 2024-12-08T11:20:27,929 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:27,929 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:27,929 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. after waiting 0 ms 2024-12-08T11:20:27,929 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:27,933 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-08T11:20:27,934 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:27,934 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegion(1635): Region close journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:27,934 WARN [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] regionserver.HRegionServer(3786): Not adding moved region record: e39fb8694c3bacc24718f77e81d6379f to self. 2024-12-08T11:20:27,936 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=47}] handler.UnassignRegionHandler(170): Closed e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:27,936 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=CLOSED 2024-12-08T11:20:27,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-08T11:20:27,938 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; CloseRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 in 160 msec 2024-12-08T11:20:27,939 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, REOPEN/MOVE; state=CLOSED, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=true 2024-12-08T11:20:28,089 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,091 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=46, state=RUNNABLE; OpenRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:20:28,243 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,246 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,246 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7285): Opening region: {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:20:28,246 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,246 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:20:28,246 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7327): checking encryption for e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,247 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7330): checking classloading for e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,251 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,252 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:28,257 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e39fb8694c3bacc24718f77e81d6379f columnFamilyName A 2024-12-08T11:20:28,259 DEBUG [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:28,259 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(327): Store=e39fb8694c3bacc24718f77e81d6379f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:28,260 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,261 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:28,261 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e39fb8694c3bacc24718f77e81d6379f columnFamilyName B 2024-12-08T11:20:28,261 DEBUG [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:28,261 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(327): Store=e39fb8694c3bacc24718f77e81d6379f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:28,261 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,262 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:28,262 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region e39fb8694c3bacc24718f77e81d6379f columnFamilyName C 2024-12-08T11:20:28,262 DEBUG [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:28,263 INFO [StoreOpener-e39fb8694c3bacc24718f77e81d6379f-1 {}] regionserver.HStore(327): Store=e39fb8694c3bacc24718f77e81d6379f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:28,263 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,263 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,264 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,266 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:20:28,267 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1085): writing seq id for e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,268 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1102): Opened e39fb8694c3bacc24718f77e81d6379f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66730982, jitterRate=-0.00563088059425354}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:20:28,269 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1001): Region open journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:28,270 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., pid=48, masterSystemTime=1733656828242 2024-12-08T11:20:28,272 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,272 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,272 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=46 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=OPEN, openSeqNum=5, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,274 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=46 2024-12-08T11:20:28,275 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; OpenRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 in 182 msec 2024-12-08T11:20:28,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-08T11:20:28,276 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, REOPEN/MOVE in 501 msec 2024-12-08T11:20:28,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-12-08T11:20:28,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 515 msec 2024-12-08T11:20:28,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 939 msec 2024-12-08T11:20:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=44 2024-12-08T11:20:28,290 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c826820 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29458edd 2024-12-08T11:20:28,297 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cae6c5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,298 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2931c73e to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7d6279 2024-12-08T11:20:28,301 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,302 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x176c5c1b to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328f994d 2024-12-08T11:20:28,305 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,306 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-12-08T11:20:28,310 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e904d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-12-08T11:20:28,314 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,315 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-12-08T11:20:28,317 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb464a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,318 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-12-08T11:20:28,321 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,322 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-12-08T11:20:28,326 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,327 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-12-08T11:20:28,331 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:28,335 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:28,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-08T11:20:28,337 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:28,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T11:20:28,337 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:28,337 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:28,338 DEBUG [hconnection-0x79bfd5bf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,339 DEBUG [hconnection-0xa5c643a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,339 DEBUG [hconnection-0x694bb74e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,339 DEBUG [hconnection-0x6b4524be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,340 DEBUG [hconnection-0x2ac3e101-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,340 DEBUG [hconnection-0x2bccd049-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,341 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,341 DEBUG [hconnection-0x1adbc80d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,341 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57722, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,341 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57744, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,341 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57758, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,342 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57770, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,342 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,345 DEBUG [hconnection-0x40fddafc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,346 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57798, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,348 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,352 DEBUG [hconnection-0x4491bf73-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:28,353 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57816, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:28,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,361 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:20:28,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:28,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:28,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:28,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:28,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:28,363 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:28,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656888406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656888409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656888412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656888415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656888416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086c5aeb6e72784fca845a181323f82342_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656828356/Put/seqid=0 2024-12-08T11:20:28,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T11:20:28,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741968_1144 (size=19474) 2024-12-08T11:20:28,454 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:28,463 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086c5aeb6e72784fca845a181323f82342_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086c5aeb6e72784fca845a181323f82342_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:28,465 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6e64c4ca25454ebbaf96fdf922a87091, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:28,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6e64c4ca25454ebbaf96fdf922a87091 is 175, key is test_row_0/A:col10/1733656828356/Put/seqid=0 2024-12-08T11:20:28,489 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:28,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:28,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,490 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741969_1145 (size=56733) 2024-12-08T11:20:28,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656888518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656888522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656888525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656888525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656888526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T11:20:28,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:28,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:28,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656888732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656888732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656888733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,734 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656888733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:28,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656888733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,799 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:28,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:28,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,801 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,914 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6e64c4ca25454ebbaf96fdf922a87091 2024-12-08T11:20:28,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T11:20:28,954 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:28,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:28,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:28,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:28,955 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:28,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3bad87a005d34384aff2c70538d23fe3 is 50, key is test_row_0/B:col10/1733656828356/Put/seqid=0 2024-12-08T11:20:28,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741970_1146 (size=12001) 2024-12-08T11:20:28,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3bad87a005d34384aff2c70538d23fe3 2024-12-08T11:20:29,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/2ed4837e751a4518aeacea48c10b7923 is 50, key is test_row_0/C:col10/1733656828356/Put/seqid=0 2024-12-08T11:20:29,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656889036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656889037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656889036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656889038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656889042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741971_1147 (size=12001) 2024-12-08T11:20:29,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,110 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:29,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:29,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,110 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,110 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,263 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:29,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:29,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,264 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,416 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:29,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:29,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,417 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T11:20:29,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/2ed4837e751a4518aeacea48c10b7923 2024-12-08T11:20:29,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6e64c4ca25454ebbaf96fdf922a87091 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6e64c4ca25454ebbaf96fdf922a87091 2024-12-08T11:20:29,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6e64c4ca25454ebbaf96fdf922a87091, entries=300, sequenceid=16, filesize=55.4 K 2024-12-08T11:20:29,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3bad87a005d34384aff2c70538d23fe3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3bad87a005d34384aff2c70538d23fe3 2024-12-08T11:20:29,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3bad87a005d34384aff2c70538d23fe3, entries=150, sequenceid=16, filesize=11.7 K 2024-12-08T11:20:29,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/2ed4837e751a4518aeacea48c10b7923 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2ed4837e751a4518aeacea48c10b7923 2024-12-08T11:20:29,487 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2ed4837e751a4518aeacea48c10b7923, entries=150, sequenceid=16, filesize=11.7 K 2024-12-08T11:20:29,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for e39fb8694c3bacc24718f77e81d6379f in 1127ms, sequenceid=16, compaction requested=false 2024-12-08T11:20:29,489 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-08T11:20:29,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:29,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:20:29,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:29,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:29,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:29,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:29,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:29,559 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:29,570 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:29,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:29,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120898f163944c86405ea34959d84f837299_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656828392/Put/seqid=0 2024-12-08T11:20:29,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656889569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656889586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656889586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656889589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656889593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741972_1148 (size=12154) 2024-12-08T11:20:29,648 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:29,656 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120898f163944c86405ea34959d84f837299_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120898f163944c86405ea34959d84f837299_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:29,658 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/7363d722660b4d7897cbb6a7b21bd6b9, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:29,659 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/7363d722660b4d7897cbb6a7b21bd6b9 is 175, key is test_row_0/A:col10/1733656828392/Put/seqid=0 2024-12-08T11:20:29,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741973_1149 (size=30955) 2024-12-08T11:20:29,689 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/7363d722660b4d7897cbb6a7b21bd6b9 2024-12-08T11:20:29,701 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3056591f8d634817bcaebb645c654e55 is 50, key is test_row_0/B:col10/1733656828392/Put/seqid=0 2024-12-08T11:20:29,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656889700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656889706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656889707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656889707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656889707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,723 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:29,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:29,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,725 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741974_1150 (size=12001) 2024-12-08T11:20:29,877 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:29,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:29,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:29,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:29,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656889903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656889914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656889915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656889916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:29,918 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656889916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,031 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:30,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:30,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,143 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T11:20:30,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3056591f8d634817bcaebb645c654e55 2024-12-08T11:20:30,171 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/0031b815701048d3af7f758a99e35768 is 50, key is test_row_0/C:col10/1733656828392/Put/seqid=0 2024-12-08T11:20:30,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741975_1151 (size=12001) 2024-12-08T11:20:30,184 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/0031b815701048d3af7f758a99e35768 2024-12-08T11:20:30,185 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,186 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:30,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:30,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,186 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/7363d722660b4d7897cbb6a7b21bd6b9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/7363d722660b4d7897cbb6a7b21bd6b9 2024-12-08T11:20:30,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/7363d722660b4d7897cbb6a7b21bd6b9, entries=150, sequenceid=42, filesize=30.2 K 2024-12-08T11:20:30,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3056591f8d634817bcaebb645c654e55 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3056591f8d634817bcaebb645c654e55 2024-12-08T11:20:30,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3056591f8d634817bcaebb645c654e55, entries=150, sequenceid=42, filesize=11.7 K 2024-12-08T11:20:30,206 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/0031b815701048d3af7f758a99e35768 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0031b815701048d3af7f758a99e35768 2024-12-08T11:20:30,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0031b815701048d3af7f758a99e35768, entries=150, sequenceid=42, filesize=11.7 K 2024-12-08T11:20:30,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for e39fb8694c3bacc24718f77e81d6379f in 656ms, sequenceid=42, compaction requested=false 2024-12-08T11:20:30,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:30,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:30,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:20:30,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:30,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:30,221 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:30,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:30,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:30,222 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:30,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f42792ab630f459e8c1c45f0f31d0eea_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656829590/Put/seqid=0 2024-12-08T11:20:30,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741976_1152 (size=14594) 2024-12-08T11:20:30,256 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:30,266 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f42792ab630f459e8c1c45f0f31d0eea_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f42792ab630f459e8c1c45f0f31d0eea_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:30,267 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/0082d0b25f3a43f4bef02f4b2999c278, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:30,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/0082d0b25f3a43f4bef02f4b2999c278 is 175, key is test_row_0/A:col10/1733656829590/Put/seqid=0 2024-12-08T11:20:30,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741977_1153 (size=39549) 2024-12-08T11:20:30,283 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/0082d0b25f3a43f4bef02f4b2999c278 2024-12-08T11:20:30,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656890277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656890279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656890280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656890281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656890282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/7f62da32e92c4891bd3b7a18582abe24 is 50, key is test_row_0/B:col10/1733656829590/Put/seqid=0 2024-12-08T11:20:30,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741978_1154 (size=12001) 2024-12-08T11:20:30,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/7f62da32e92c4891bd3b7a18582abe24 2024-12-08T11:20:30,338 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:30,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:30,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,339 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:30,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5fa64daf04a145dd8785629745968a87 is 50, key is test_row_0/C:col10/1733656829590/Put/seqid=0 2024-12-08T11:20:30,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741979_1155 (size=12001) 2024-12-08T11:20:30,380 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5fa64daf04a145dd8785629745968a87 2024-12-08T11:20:30,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/0082d0b25f3a43f4bef02f4b2999c278 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/0082d0b25f3a43f4bef02f4b2999c278 2024-12-08T11:20:30,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656890387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,390 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656890389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656890389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656890389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656890390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,397 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/0082d0b25f3a43f4bef02f4b2999c278, entries=200, sequenceid=53, filesize=38.6 K 2024-12-08T11:20:30,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/7f62da32e92c4891bd3b7a18582abe24 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7f62da32e92c4891bd3b7a18582abe24 2024-12-08T11:20:30,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7f62da32e92c4891bd3b7a18582abe24, entries=150, sequenceid=53, filesize=11.7 K 2024-12-08T11:20:30,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5fa64daf04a145dd8785629745968a87 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5fa64daf04a145dd8785629745968a87 2024-12-08T11:20:30,418 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5fa64daf04a145dd8785629745968a87, entries=150, sequenceid=53, filesize=11.7 K 2024-12-08T11:20:30,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for e39fb8694c3bacc24718f77e81d6379f in 198ms, sequenceid=53, compaction requested=true 2024-12-08T11:20:30,419 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:30,420 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:30,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:30,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:30,420 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:30,421 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:30,421 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:30,421 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,422 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3bad87a005d34384aff2c70538d23fe3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3056591f8d634817bcaebb645c654e55, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7f62da32e92c4891bd3b7a18582abe24] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.2 K 2024-12-08T11:20:30,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:30,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:30,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:30,422 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:30,422 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127237 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:30,422 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:30,422 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,423 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6e64c4ca25454ebbaf96fdf922a87091, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/7363d722660b4d7897cbb6a7b21bd6b9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/0082d0b25f3a43f4bef02f4b2999c278] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=124.3 K 2024-12-08T11:20:30,423 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,423 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6e64c4ca25454ebbaf96fdf922a87091, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/7363d722660b4d7897cbb6a7b21bd6b9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/0082d0b25f3a43f4bef02f4b2999c278] 2024-12-08T11:20:30,424 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bad87a005d34384aff2c70538d23fe3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733656828356 2024-12-08T11:20:30,424 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3056591f8d634817bcaebb645c654e55, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733656828392 2024-12-08T11:20:30,424 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e64c4ca25454ebbaf96fdf922a87091, keycount=300, bloomtype=ROW, size=55.4 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733656828350 2024-12-08T11:20:30,425 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f62da32e92c4891bd3b7a18582abe24, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656829581 2024-12-08T11:20:30,425 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7363d722660b4d7897cbb6a7b21bd6b9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733656828392 2024-12-08T11:20:30,425 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 0082d0b25f3a43f4bef02f4b2999c278, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656829581 2024-12-08T11:20:30,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T11:20:30,451 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#135 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:30,451 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/2177e16ef18c491f90b90dd625cea51c is 50, key is test_row_0/B:col10/1733656829590/Put/seqid=0 2024-12-08T11:20:30,461 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:30,466 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208de97098cd7374897a9fdeeac55eeb0f5_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:30,473 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208de97098cd7374897a9fdeeac55eeb0f5_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:30,474 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208de97098cd7374897a9fdeeac55eeb0f5_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:30,491 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,492 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-08T11:20:30,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,492 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:20:30,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:30,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:30,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:30,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:30,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:30,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:30,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741980_1156 (size=12104) 2024-12-08T11:20:30,504 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/2177e16ef18c491f90b90dd625cea51c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/2177e16ef18c491f90b90dd625cea51c 2024-12-08T11:20:30,512 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into 2177e16ef18c491f90b90dd625cea51c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:30,513 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:30,513 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=13, startTime=1733656830420; duration=0sec 2024-12-08T11:20:30,513 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:30,513 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:30,513 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:30,517 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:30,517 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:30,517 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:30,517 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2ed4837e751a4518aeacea48c10b7923, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0031b815701048d3af7f758a99e35768, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5fa64daf04a145dd8785629745968a87] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.2 K 2024-12-08T11:20:30,518 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ed4837e751a4518aeacea48c10b7923, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733656828356 2024-12-08T11:20:30,518 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0031b815701048d3af7f758a99e35768, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1733656828392 2024-12-08T11:20:30,519 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fa64daf04a145dd8785629745968a87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656829581 2024-12-08T11:20:30,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741981_1157 (size=4469) 2024-12-08T11:20:30,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f5e43491b2bb4b19919eb74e895955e3_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656830281/Put/seqid=0 2024-12-08T11:20:30,526 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#136 average throughput is 0.38 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:30,528 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/f7dd4fa912224f449b6b91633e515433 is 175, key is test_row_0/A:col10/1733656829590/Put/seqid=0 2024-12-08T11:20:30,552 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#138 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:30,552 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/cb6e929a991d48cab4fa8094647316a9 is 50, key is test_row_0/C:col10/1733656829590/Put/seqid=0 2024-12-08T11:20:30,561 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-08T11:20:30,564 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-08T11:20:30,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741982_1158 (size=31058) 2024-12-08T11:20:30,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:30,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:30,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741983_1159 (size=12154) 2024-12-08T11:20:30,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:30,609 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f5e43491b2bb4b19919eb74e895955e3_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f5e43491b2bb4b19919eb74e895955e3_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:30,617 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/f7dd4fa912224f449b6b91633e515433 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7dd4fa912224f449b6b91633e515433 2024-12-08T11:20:30,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/214d70abf8f84bd9b15ae1e4cc5c111b, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:30,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/214d70abf8f84bd9b15ae1e4cc5c111b is 175, key is test_row_0/A:col10/1733656830281/Put/seqid=0 2024-12-08T11:20:30,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656890609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656890615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,625 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into f7dd4fa912224f449b6b91633e515433(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:30,625 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:30,625 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=13, startTime=1733656830419; duration=0sec 2024-12-08T11:20:30,625 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:30,625 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:30,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656890619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656890620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656890622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741984_1160 (size=12104) 2024-12-08T11:20:30,641 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/cb6e929a991d48cab4fa8094647316a9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/cb6e929a991d48cab4fa8094647316a9 2024-12-08T11:20:30,649 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into cb6e929a991d48cab4fa8094647316a9(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:30,649 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:30,649 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=13, startTime=1733656830422; duration=0sec 2024-12-08T11:20:30,650 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:30,650 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:30,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741985_1161 (size=30955) 2024-12-08T11:20:30,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656890721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656890722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656890728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656890729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656890730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656890927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656890928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656890931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:30,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656890932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:30,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656890932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,069 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/214d70abf8f84bd9b15ae1e4cc5c111b 2024-12-08T11:20:31,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/755215a241ef4a61bb3f5e028892033d is 50, key is test_row_0/B:col10/1733656830281/Put/seqid=0 2024-12-08T11:20:31,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741986_1162 (size=12001) 2024-12-08T11:20:31,131 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/755215a241ef4a61bb3f5e028892033d 2024-12-08T11:20:31,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/6e63bfefe598485eb8835e3a0b1c22f4 is 50, key is test_row_0/C:col10/1733656830281/Put/seqid=0 2024-12-08T11:20:31,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741987_1163 (size=12001) 2024-12-08T11:20:31,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656891237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656891237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656891237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656891237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656891253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,596 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/6e63bfefe598485eb8835e3a0b1c22f4 2024-12-08T11:20:31,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/214d70abf8f84bd9b15ae1e4cc5c111b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/214d70abf8f84bd9b15ae1e4cc5c111b 2024-12-08T11:20:31,609 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/214d70abf8f84bd9b15ae1e4cc5c111b, entries=150, sequenceid=78, filesize=30.2 K 2024-12-08T11:20:31,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/755215a241ef4a61bb3f5e028892033d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/755215a241ef4a61bb3f5e028892033d 2024-12-08T11:20:31,616 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/755215a241ef4a61bb3f5e028892033d, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T11:20:31,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/6e63bfefe598485eb8835e3a0b1c22f4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/6e63bfefe598485eb8835e3a0b1c22f4 2024-12-08T11:20:31,631 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/6e63bfefe598485eb8835e3a0b1c22f4, entries=150, sequenceid=78, filesize=11.7 K 2024-12-08T11:20:31,640 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for e39fb8694c3bacc24718f77e81d6379f in 1148ms, sequenceid=78, compaction requested=false 2024-12-08T11:20:31,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:31,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:31,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-08T11:20:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-08T11:20:31,644 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-08T11:20:31,644 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3050 sec 2024-12-08T11:20:31,646 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 3.3090 sec 2024-12-08T11:20:31,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:20:31,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:31,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:31,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:31,747 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:31,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:31,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:31,748 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:31,779 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089324cfe12855485ab03908d834895082_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656830617/Put/seqid=0 2024-12-08T11:20:31,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656891782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656891785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656891790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656891792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656891791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741988_1164 (size=12154) 2024-12-08T11:20:31,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656891925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656891925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656891925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656891925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:31,929 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:31,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656891926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,134 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656892130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656892130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656892131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656892131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656892131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,224 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:32,250 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089324cfe12855485ab03908d834895082_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089324cfe12855485ab03908d834895082_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:32,256 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/eae8e330f9b643799f5bb7125df6994c, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:32,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/eae8e330f9b643799f5bb7125df6994c is 175, key is test_row_0/A:col10/1733656830617/Put/seqid=0 2024-12-08T11:20:32,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741989_1165 (size=30955) 2024-12-08T11:20:32,306 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/eae8e330f9b643799f5bb7125df6994c 2024-12-08T11:20:32,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/5615edfca6bc44ec8e72d4468a89a62c is 50, key is test_row_0/B:col10/1733656830617/Put/seqid=0 2024-12-08T11:20:32,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741990_1166 (size=12001) 2024-12-08T11:20:32,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/5615edfca6bc44ec8e72d4468a89a62c 2024-12-08T11:20:32,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a47d443b6ce34b9ea2d6f791f00b2fa0 is 50, key is test_row_0/C:col10/1733656830617/Put/seqid=0 2024-12-08T11:20:32,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656892437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656892438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656892438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741991_1167 (size=12001) 2024-12-08T11:20:32,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-08T11:20:32,444 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-08T11:20:32,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a47d443b6ce34b9ea2d6f791f00b2fa0 2024-12-08T11:20:32,446 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:32,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-08T11:20:32,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656892444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T11:20:32,448 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:32,449 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:32,449 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:32,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656892444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/eae8e330f9b643799f5bb7125df6994c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/eae8e330f9b643799f5bb7125df6994c 2024-12-08T11:20:32,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/eae8e330f9b643799f5bb7125df6994c, entries=150, sequenceid=94, filesize=30.2 K 2024-12-08T11:20:32,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/5615edfca6bc44ec8e72d4468a89a62c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5615edfca6bc44ec8e72d4468a89a62c 2024-12-08T11:20:32,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5615edfca6bc44ec8e72d4468a89a62c, entries=150, sequenceid=94, filesize=11.7 K 2024-12-08T11:20:32,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a47d443b6ce34b9ea2d6f791f00b2fa0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a47d443b6ce34b9ea2d6f791f00b2fa0 2024-12-08T11:20:32,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a47d443b6ce34b9ea2d6f791f00b2fa0, entries=150, sequenceid=94, filesize=11.7 K 2024-12-08T11:20:32,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e39fb8694c3bacc24718f77e81d6379f in 737ms, sequenceid=94, compaction requested=true 2024-12-08T11:20:32,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:32,483 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:32,485 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:32,485 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:32,485 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:32,485 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7dd4fa912224f449b6b91633e515433, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/214d70abf8f84bd9b15ae1e4cc5c111b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/eae8e330f9b643799f5bb7125df6994c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=90.8 K 2024-12-08T11:20:32,485 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:32,485 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7dd4fa912224f449b6b91633e515433, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/214d70abf8f84bd9b15ae1e4cc5c111b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/eae8e330f9b643799f5bb7125df6994c] 2024-12-08T11:20:32,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:32,486 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7dd4fa912224f449b6b91633e515433, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656829581 2024-12-08T11:20:32,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:32,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:32,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:32,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:32,486 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:20:32,486 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 214d70abf8f84bd9b15ae1e4cc5c111b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733656830276 2024-12-08T11:20:32,487 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting eae8e330f9b643799f5bb7125df6994c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656830614 2024-12-08T11:20:32,487 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:32,489 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:32,489 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:32,489 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:32,489 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/2177e16ef18c491f90b90dd625cea51c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/755215a241ef4a61bb3f5e028892033d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5615edfca6bc44ec8e72d4468a89a62c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.3 K 2024-12-08T11:20:32,490 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2177e16ef18c491f90b90dd625cea51c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656829581 2024-12-08T11:20:32,491 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 755215a241ef4a61bb3f5e028892033d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733656830276 2024-12-08T11:20:32,492 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5615edfca6bc44ec8e72d4468a89a62c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656830614 2024-12-08T11:20:32,501 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:32,510 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#145 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:32,511 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d73d5fc120424c62aaca220f02b86f95 is 50, key is test_row_0/B:col10/1733656830617/Put/seqid=0 2024-12-08T11:20:32,513 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120863f124453b834f1287069c3625190dd5_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:32,517 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120863f124453b834f1287069c3625190dd5_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:32,517 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120863f124453b834f1287069c3625190dd5_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:32,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741992_1168 (size=12207) 2024-12-08T11:20:32,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T11:20:32,551 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d73d5fc120424c62aaca220f02b86f95 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d73d5fc120424c62aaca220f02b86f95 2024-12-08T11:20:32,557 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into d73d5fc120424c62aaca220f02b86f95(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:32,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:32,558 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=13, startTime=1733656832486; duration=0sec 2024-12-08T11:20:32,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:32,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:32,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:32,559 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:32,560 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:32,560 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:32,560 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/cb6e929a991d48cab4fa8094647316a9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/6e63bfefe598485eb8835e3a0b1c22f4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a47d443b6ce34b9ea2d6f791f00b2fa0] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.3 K 2024-12-08T11:20:32,560 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting cb6e929a991d48cab4fa8094647316a9, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1733656829581 2024-12-08T11:20:32,561 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e63bfefe598485eb8835e3a0b1c22f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1733656830276 2024-12-08T11:20:32,561 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a47d443b6ce34b9ea2d6f791f00b2fa0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656830614 2024-12-08T11:20:32,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741993_1169 (size=4469) 2024-12-08T11:20:32,594 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#146 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:32,595 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5ac7b5b561dd45dd9bb8408549bf997d is 50, key is test_row_0/C:col10/1733656830617/Put/seqid=0 2024-12-08T11:20:32,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-08T11:20:32,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:32,603 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T11:20:32,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:32,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:32,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:32,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:32,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:32,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:32,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741994_1170 (size=12207) 2024-12-08T11:20:32,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082eaadcce464047ccbf21fcf01e86f0c3_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656831790/Put/seqid=0 2024-12-08T11:20:32,628 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5ac7b5b561dd45dd9bb8408549bf997d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5ac7b5b561dd45dd9bb8408549bf997d 2024-12-08T11:20:32,634 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into 5ac7b5b561dd45dd9bb8408549bf997d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:32,634 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:32,634 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=13, startTime=1733656832486; duration=0sec 2024-12-08T11:20:32,635 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:32,635 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:32,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741995_1171 (size=12154) 2024-12-08T11:20:32,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T11:20:32,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:32,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:32,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656892966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,982 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#144 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:32,983 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/147a859b1b72442ba71a11290139b7f3 is 175, key is test_row_0/A:col10/1733656830617/Put/seqid=0 2024-12-08T11:20:32,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656892977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656892978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656892980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:32,987 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:32,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656892980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741996_1172 (size=31161) 2024-12-08T11:20:33,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:33,048 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082eaadcce464047ccbf21fcf01e86f0c3_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082eaadcce464047ccbf21fcf01e86f0c3_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:33,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/bb542e3b0a9a45f58c95b60dfd147867, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:33,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/bb542e3b0a9a45f58c95b60dfd147867 is 175, key is test_row_0/A:col10/1733656831790/Put/seqid=0 2024-12-08T11:20:33,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T11:20:33,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656893082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656893087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656893090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656893090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656893091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741997_1173 (size=30955) 2024-12-08T11:20:33,107 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/bb542e3b0a9a45f58c95b60dfd147867 2024-12-08T11:20:33,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/363c66006107457abac3c0ad1b29e0a3 is 50, key is test_row_0/B:col10/1733656831790/Put/seqid=0 2024-12-08T11:20:33,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741998_1174 (size=12001) 2024-12-08T11:20:33,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656893295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656893296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656893297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656893297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656893309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,432 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/147a859b1b72442ba71a11290139b7f3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/147a859b1b72442ba71a11290139b7f3 2024-12-08T11:20:33,446 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into 147a859b1b72442ba71a11290139b7f3(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:33,446 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:33,446 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=13, startTime=1733656832483; duration=0sec 2024-12-08T11:20:33,446 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:33,446 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:33,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T11:20:33,571 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/363c66006107457abac3c0ad1b29e0a3 2024-12-08T11:20:33,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/f4d0ce8d2a3e43ae92638ef05b334282 is 50, key is test_row_0/C:col10/1733656831790/Put/seqid=0 2024-12-08T11:20:33,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656893598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656893602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,615 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656893610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656893614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:33,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656893602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:33,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741999_1175 (size=12001) 2024-12-08T11:20:33,632 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/f4d0ce8d2a3e43ae92638ef05b334282 2024-12-08T11:20:33,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/bb542e3b0a9a45f58c95b60dfd147867 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/bb542e3b0a9a45f58c95b60dfd147867 2024-12-08T11:20:33,645 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/bb542e3b0a9a45f58c95b60dfd147867, entries=150, sequenceid=118, filesize=30.2 K 2024-12-08T11:20:33,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/363c66006107457abac3c0ad1b29e0a3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/363c66006107457abac3c0ad1b29e0a3 2024-12-08T11:20:33,651 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/363c66006107457abac3c0ad1b29e0a3, entries=150, sequenceid=118, filesize=11.7 K 2024-12-08T11:20:33,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/f4d0ce8d2a3e43ae92638ef05b334282 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f4d0ce8d2a3e43ae92638ef05b334282 2024-12-08T11:20:33,657 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f4d0ce8d2a3e43ae92638ef05b334282, entries=150, sequenceid=118, filesize=11.7 K 2024-12-08T11:20:33,658 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for e39fb8694c3bacc24718f77e81d6379f in 1056ms, sequenceid=118, compaction requested=false 2024-12-08T11:20:33,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:33,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:33,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-08T11:20:33,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-08T11:20:33,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-08T11:20:33,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2100 sec 2024-12-08T11:20:33,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.2160 sec 2024-12-08T11:20:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:34,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T11:20:34,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:34,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:34,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:34,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:34,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:34,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:34,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086f4a8e5848274b5c88b27e1c5467c10c_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656834110/Put/seqid=0 2024-12-08T11:20:34,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742000_1176 (size=12254) 2024-12-08T11:20:34,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656894130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656894148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656894183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656894183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656894186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656894289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656894290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,292 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656894291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656894291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656894291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656894493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656894494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656894495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656894494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656894495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,556 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:34,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-08T11:20:34,562 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-08T11:20:34,562 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086f4a8e5848274b5c88b27e1c5467c10c_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086f4a8e5848274b5c88b27e1c5467c10c_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:34,564 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/3bb8d22aecc047a4ad9625319223fee7, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:34,564 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/3bb8d22aecc047a4ad9625319223fee7 is 175, key is test_row_0/A:col10/1733656834110/Put/seqid=0 2024-12-08T11:20:34,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:34,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-08T11:20:34,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T11:20:34,568 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:34,569 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:34,569 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:34,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742001_1177 (size=31055) 2024-12-08T11:20:34,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T11:20:34,736 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T11:20:34,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:34,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:34,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:34,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:34,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:34,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:34,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656894798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656894799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656894799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656894800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:34,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656894800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T11:20:34,890 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:34,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T11:20:34,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:34,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:34,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:34,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:34,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:34,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,011 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=136, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/3bb8d22aecc047a4ad9625319223fee7 2024-12-08T11:20:35,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d92d90b2825c471ab0f767a2080cee40 is 50, key is test_row_0/B:col10/1733656834110/Put/seqid=0 2024-12-08T11:20:35,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742002_1178 (size=12101) 2024-12-08T11:20:35,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d92d90b2825c471ab0f767a2080cee40 2024-12-08T11:20:35,046 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T11:20:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,048 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/8c641353bc92468a9067a8f8ec63eb74 is 50, key is test_row_0/C:col10/1733656834110/Put/seqid=0 2024-12-08T11:20:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742003_1179 (size=12101) 2024-12-08T11:20:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T11:20:35,200 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,200 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T11:20:35,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:35,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,201 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,236 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-08T11:20:35,236 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-08T11:20:35,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:35,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656895302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:35,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656895307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:35,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656895309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656895309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,317 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:35,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656895311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,353 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T11:20:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:35,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/8c641353bc92468a9067a8f8ec63eb74 2024-12-08T11:20:35,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/3bb8d22aecc047a4ad9625319223fee7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/3bb8d22aecc047a4ad9625319223fee7 2024-12-08T11:20:35,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/3bb8d22aecc047a4ad9625319223fee7, entries=150, sequenceid=136, filesize=30.3 K 2024-12-08T11:20:35,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d92d90b2825c471ab0f767a2080cee40 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d92d90b2825c471ab0f767a2080cee40 2024-12-08T11:20:35,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d92d90b2825c471ab0f767a2080cee40, entries=150, sequenceid=136, filesize=11.8 K 2024-12-08T11:20:35,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/8c641353bc92468a9067a8f8ec63eb74 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/8c641353bc92468a9067a8f8ec63eb74 2024-12-08T11:20:35,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/8c641353bc92468a9067a8f8ec63eb74, entries=150, sequenceid=136, filesize=11.8 K 2024-12-08T11:20:35,499 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for e39fb8694c3bacc24718f77e81d6379f in 1389ms, sequenceid=136, compaction requested=true 2024-12-08T11:20:35,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:35,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:35,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:35,499 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:35,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:35,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:35,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:35,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:20:35,499 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:35,501 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:35,501 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:35,501 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,501 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d73d5fc120424c62aaca220f02b86f95, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/363c66006107457abac3c0ad1b29e0a3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d92d90b2825c471ab0f767a2080cee40] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.5 K 2024-12-08T11:20:35,501 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:35,501 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:35,501 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,502 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/147a859b1b72442ba71a11290139b7f3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/bb542e3b0a9a45f58c95b60dfd147867, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/3bb8d22aecc047a4ad9625319223fee7] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=91.0 K 2024-12-08T11:20:35,502 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,502 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/147a859b1b72442ba71a11290139b7f3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/bb542e3b0a9a45f58c95b60dfd147867, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/3bb8d22aecc047a4ad9625319223fee7] 2024-12-08T11:20:35,503 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d73d5fc120424c62aaca220f02b86f95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656830614 2024-12-08T11:20:35,503 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 147a859b1b72442ba71a11290139b7f3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656830614 2024-12-08T11:20:35,503 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb542e3b0a9a45f58c95b60dfd147867, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733656831782 2024-12-08T11:20:35,503 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 363c66006107457abac3c0ad1b29e0a3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733656831782 2024-12-08T11:20:35,504 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bb8d22aecc047a4ad9625319223fee7, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656832963 2024-12-08T11:20:35,504 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d92d90b2825c471ab0f767a2080cee40, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656832963 2024-12-08T11:20:35,507 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:35,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-08T11:20:35,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,508 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:20:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:35,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:35,516 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:35,517 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/f79b0ca04b6a4946ab8db05d1c4f6a88 is 50, key is test_row_0/B:col10/1733656834110/Put/seqid=0 2024-12-08T11:20:35,524 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:35,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c046602954a745b1ad975fb46099a42b_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656834129/Put/seqid=0 2024-12-08T11:20:35,530 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412088620f48570fe4a87ba1ab6470da5708e_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:35,533 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412088620f48570fe4a87ba1ab6470da5708e_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:35,533 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412088620f48570fe4a87ba1ab6470da5708e_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:35,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742004_1180 (size=12409) 2024-12-08T11:20:35,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742006_1182 (size=4469) 2024-12-08T11:20:35,568 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#154 average throughput is 0.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:35,569 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/e9d629c2faad4eb5a15d055335643f79 is 175, key is test_row_0/A:col10/1733656834110/Put/seqid=0 2024-12-08T11:20:35,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742005_1181 (size=12304) 2024-12-08T11:20:35,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:35,590 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c046602954a745b1ad975fb46099a42b_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c046602954a745b1ad975fb46099a42b_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:35,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/d33d63d9b738421fb4690b4a47d84e9c, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:35,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/d33d63d9b738421fb4690b4a47d84e9c is 175, key is test_row_0/A:col10/1733656834129/Put/seqid=0 2024-12-08T11:20:35,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742007_1183 (size=31363) 2024-12-08T11:20:35,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742008_1184 (size=31105) 2024-12-08T11:20:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T11:20:35,954 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/f79b0ca04b6a4946ab8db05d1c4f6a88 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f79b0ca04b6a4946ab8db05d1c4f6a88 2024-12-08T11:20:35,961 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into f79b0ca04b6a4946ab8db05d1c4f6a88(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:35,961 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:35,961 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=13, startTime=1733656835499; duration=0sec 2024-12-08T11:20:35,962 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:35,962 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:35,962 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:35,963 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:35,963 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:35,963 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:35,964 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5ac7b5b561dd45dd9bb8408549bf997d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f4d0ce8d2a3e43ae92638ef05b334282, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/8c641353bc92468a9067a8f8ec63eb74] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.5 K 2024-12-08T11:20:35,964 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ac7b5b561dd45dd9bb8408549bf997d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656830614 2024-12-08T11:20:35,965 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f4d0ce8d2a3e43ae92638ef05b334282, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1733656831782 2024-12-08T11:20:35,965 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c641353bc92468a9067a8f8ec63eb74, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656832963 2024-12-08T11:20:35,974 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:35,975 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/e71372bab9944bcdb0fee65e45de724f is 50, key is test_row_0/C:col10/1733656834110/Put/seqid=0 2024-12-08T11:20:35,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742009_1185 (size=12409) 2024-12-08T11:20:35,992 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/e71372bab9944bcdb0fee65e45de724f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/e71372bab9944bcdb0fee65e45de724f 2024-12-08T11:20:36,002 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into e71372bab9944bcdb0fee65e45de724f(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:36,002 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:36,002 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=13, startTime=1733656835499; duration=0sec 2024-12-08T11:20:36,003 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:36,003 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:36,009 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/e9d629c2faad4eb5a15d055335643f79 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e9d629c2faad4eb5a15d055335643f79 2024-12-08T11:20:36,013 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/d33d63d9b738421fb4690b4a47d84e9c 2024-12-08T11:20:36,017 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into e9d629c2faad4eb5a15d055335643f79(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:36,017 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:36,017 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=13, startTime=1733656835499; duration=0sec 2024-12-08T11:20:36,018 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:36,018 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:36,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/848aee9193a54fe7aea62c3587e0bc9a is 50, key is test_row_0/B:col10/1733656834129/Put/seqid=0 2024-12-08T11:20:36,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742010_1186 (size=12151) 2024-12-08T11:20:36,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:36,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:36,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656896370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656896370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656896370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656896372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656896372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,435 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/848aee9193a54fe7aea62c3587e0bc9a 2024-12-08T11:20:36,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/2660455af2434fb7866202b144ab9cf0 is 50, key is test_row_0/C:col10/1733656834129/Put/seqid=0 2024-12-08T11:20:36,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742011_1187 (size=12151) 2024-12-08T11:20:36,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656896478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656896479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656896479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656896479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656896479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T11:20:36,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656896682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656896682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656896682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656896682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656896683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:36,853 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/2660455af2434fb7866202b144ab9cf0 2024-12-08T11:20:36,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/d33d63d9b738421fb4690b4a47d84e9c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d33d63d9b738421fb4690b4a47d84e9c 2024-12-08T11:20:36,875 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d33d63d9b738421fb4690b4a47d84e9c, entries=150, sequenceid=156, filesize=30.4 K 2024-12-08T11:20:36,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/848aee9193a54fe7aea62c3587e0bc9a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/848aee9193a54fe7aea62c3587e0bc9a 2024-12-08T11:20:36,882 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/848aee9193a54fe7aea62c3587e0bc9a, entries=150, sequenceid=156, filesize=11.9 K 2024-12-08T11:20:36,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/2660455af2434fb7866202b144ab9cf0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2660455af2434fb7866202b144ab9cf0 2024-12-08T11:20:36,890 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2660455af2434fb7866202b144ab9cf0, entries=150, sequenceid=156, filesize=11.9 K 2024-12-08T11:20:36,892 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for e39fb8694c3bacc24718f77e81d6379f in 1383ms, sequenceid=156, compaction requested=false 2024-12-08T11:20:36,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:36,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:36,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-08T11:20:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-08T11:20:36,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-08T11:20:36,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3250 sec 2024-12-08T11:20:36,899 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.3310 sec 2024-12-08T11:20:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:37,023 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:20:37,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:37,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:37,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:37,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:37,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:37,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:37,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c2335dc8508b4f76902f4c4187bd6c0e_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656837023/Put/seqid=0 2024-12-08T11:20:37,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742012_1188 (size=14794) 2024-12-08T11:20:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656897035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656897036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656897038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656897040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656897040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656897143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,145 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656897143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656897144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656897144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656897144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656897346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656897347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656897348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656897348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656897348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,441 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:37,446 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c2335dc8508b4f76902f4c4187bd6c0e_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c2335dc8508b4f76902f4c4187bd6c0e_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:37,447 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/58c4e3c2b3214f54b9947947b6a8df6b, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:37,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/58c4e3c2b3214f54b9947947b6a8df6b is 175, key is test_row_0/A:col10/1733656837023/Put/seqid=0 2024-12-08T11:20:37,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742013_1189 (size=39749) 2024-12-08T11:20:37,455 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=180, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/58c4e3c2b3214f54b9947947b6a8df6b 2024-12-08T11:20:37,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/b81555196ccb42cf859372bb7b0fe0a4 is 50, key is test_row_0/B:col10/1733656837023/Put/seqid=0 2024-12-08T11:20:37,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742014_1190 (size=12151) 2024-12-08T11:20:37,469 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/b81555196ccb42cf859372bb7b0fe0a4 2024-12-08T11:20:37,483 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/7beef68a7e464024946e88864158e6db is 50, key is test_row_0/C:col10/1733656837023/Put/seqid=0 2024-12-08T11:20:37,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742015_1191 (size=12151) 2024-12-08T11:20:37,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656897650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656897650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656897650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656897651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:37,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656897651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:37,897 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/7beef68a7e464024946e88864158e6db 2024-12-08T11:20:37,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/58c4e3c2b3214f54b9947947b6a8df6b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/58c4e3c2b3214f54b9947947b6a8df6b 2024-12-08T11:20:37,912 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/58c4e3c2b3214f54b9947947b6a8df6b, entries=200, sequenceid=180, filesize=38.8 K 2024-12-08T11:20:37,913 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/b81555196ccb42cf859372bb7b0fe0a4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b81555196ccb42cf859372bb7b0fe0a4 2024-12-08T11:20:37,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b81555196ccb42cf859372bb7b0fe0a4, entries=150, sequenceid=180, filesize=11.9 K 2024-12-08T11:20:37,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/7beef68a7e464024946e88864158e6db as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/7beef68a7e464024946e88864158e6db 2024-12-08T11:20:37,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/7beef68a7e464024946e88864158e6db, entries=150, sequenceid=180, filesize=11.9 K 2024-12-08T11:20:37,925 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e39fb8694c3bacc24718f77e81d6379f in 902ms, sequenceid=180, compaction requested=true 2024-12-08T11:20:37,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:37,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:37,926 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:37,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:37,926 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:37,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:37,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:37,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:37,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:37,928 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102217 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:37,928 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:37,928 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:37,928 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e9d629c2faad4eb5a15d055335643f79, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d33d63d9b738421fb4690b4a47d84e9c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/58c4e3c2b3214f54b9947947b6a8df6b] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=99.8 K 2024-12-08T11:20:37,928 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:37,929 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:37,929 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:37,929 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:37,929 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f79b0ca04b6a4946ab8db05d1c4f6a88, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/848aee9193a54fe7aea62c3587e0bc9a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b81555196ccb42cf859372bb7b0fe0a4] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.9 K 2024-12-08T11:20:37,929 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f79b0ca04b6a4946ab8db05d1c4f6a88, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656832963 2024-12-08T11:20:37,930 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 848aee9193a54fe7aea62c3587e0bc9a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733656834126 2024-12-08T11:20:37,932 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b81555196ccb42cf859372bb7b0fe0a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733656836370 2024-12-08T11:20:37,929 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e9d629c2faad4eb5a15d055335643f79, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d33d63d9b738421fb4690b4a47d84e9c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/58c4e3c2b3214f54b9947947b6a8df6b] 2024-12-08T11:20:37,935 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e9d629c2faad4eb5a15d055335643f79, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656832963 2024-12-08T11:20:37,936 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d33d63d9b738421fb4690b4a47d84e9c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733656834126 2024-12-08T11:20:37,936 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58c4e3c2b3214f54b9947947b6a8df6b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733656836370 2024-12-08T11:20:37,942 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#162 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:37,943 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/dc37d8162b2e45e9ba3461ce6bb30661 is 50, key is test_row_0/B:col10/1733656837023/Put/seqid=0 2024-12-08T11:20:37,963 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:37,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742016_1192 (size=12561) 2024-12-08T11:20:37,980 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208cb6cc82a909d4ffdbde0137483345cee_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:37,983 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208cb6cc82a909d4ffdbde0137483345cee_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:37,983 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208cb6cc82a909d4ffdbde0137483345cee_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:37,986 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/dc37d8162b2e45e9ba3461ce6bb30661 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/dc37d8162b2e45e9ba3461ce6bb30661 2024-12-08T11:20:37,994 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into dc37d8162b2e45e9ba3461ce6bb30661(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:37,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:37,994 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=13, startTime=1733656837926; duration=0sec 2024-12-08T11:20:37,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:37,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:37,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:37,996 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:37,996 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:37,996 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:37,996 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/e71372bab9944bcdb0fee65e45de724f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2660455af2434fb7866202b144ab9cf0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/7beef68a7e464024946e88864158e6db] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=35.9 K 2024-12-08T11:20:37,997 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e71372bab9944bcdb0fee65e45de724f, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656832963 2024-12-08T11:20:37,998 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2660455af2434fb7866202b144ab9cf0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1733656834126 2024-12-08T11:20:37,998 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7beef68a7e464024946e88864158e6db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733656836370 2024-12-08T11:20:38,014 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#164 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:38,015 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/817d89d220564147aeb6278d79f00240 is 50, key is test_row_0/C:col10/1733656837023/Put/seqid=0 2024-12-08T11:20:38,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742018_1194 (size=12561) 2024-12-08T11:20:38,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742017_1193 (size=4469) 2024-12-08T11:20:38,037 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/817d89d220564147aeb6278d79f00240 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/817d89d220564147aeb6278d79f00240 2024-12-08T11:20:38,038 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#163 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:38,039 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/c96d462c2f89415a8fd778a2c5c28f55 is 175, key is test_row_0/A:col10/1733656837023/Put/seqid=0 2024-12-08T11:20:38,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742019_1195 (size=31515) 2024-12-08T11:20:38,050 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into 817d89d220564147aeb6278d79f00240(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:38,051 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:38,051 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=13, startTime=1733656837926; duration=0sec 2024-12-08T11:20:38,051 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:38,051 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:38,060 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/c96d462c2f89415a8fd778a2c5c28f55 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/c96d462c2f89415a8fd778a2c5c28f55 2024-12-08T11:20:38,067 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into c96d462c2f89415a8fd778a2c5c28f55(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:38,068 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:38,068 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=13, startTime=1733656837926; duration=0sec 2024-12-08T11:20:38,068 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:38,068 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:38,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:38,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T11:20:38,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:38,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:38,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:38,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:38,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:38,155 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:38,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084f4123cae11f4d16975e2057438b338c_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656838153/Put/seqid=0 2024-12-08T11:20:38,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742020_1196 (size=14794) 2024-12-08T11:20:38,170 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:38,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656898169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656898171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656898171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,175 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412084f4123cae11f4d16975e2057438b338c_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084f4123cae11f4d16975e2057438b338c_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:38,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656898176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,177 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/36f0a5abc1094ed48a16d6d87fba4d53, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:38,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656898176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/36f0a5abc1094ed48a16d6d87fba4d53 is 175, key is test_row_0/A:col10/1733656838153/Put/seqid=0 2024-12-08T11:20:38,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742021_1197 (size=39749) 2024-12-08T11:20:38,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656898277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656898277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656898277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656898278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656898279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,480 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656898479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656898479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656898481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656898482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,488 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656898488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,611 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/36f0a5abc1094ed48a16d6d87fba4d53 2024-12-08T11:20:38,622 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3d574b1d2fbe4961b658eabfbd56c065 is 50, key is test_row_0/B:col10/1733656838153/Put/seqid=0 2024-12-08T11:20:38,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742022_1198 (size=12151) 2024-12-08T11:20:38,644 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3d574b1d2fbe4961b658eabfbd56c065 2024-12-08T11:20:38,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/ead76508fb4d4ddaae8b57c2bc74306c is 50, key is test_row_0/C:col10/1733656838153/Put/seqid=0 2024-12-08T11:20:38,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742023_1199 (size=12151) 2024-12-08T11:20:38,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/ead76508fb4d4ddaae8b57c2bc74306c 2024-12-08T11:20:38,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-08T11:20:38,674 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-08T11:20:38,677 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:38,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-08T11:20:38,680 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:38,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/36f0a5abc1094ed48a16d6d87fba4d53 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/36f0a5abc1094ed48a16d6d87fba4d53 2024-12-08T11:20:38,681 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:38,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:38,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T11:20:38,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/36f0a5abc1094ed48a16d6d87fba4d53, entries=200, sequenceid=199, filesize=38.8 K 2024-12-08T11:20:38,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3d574b1d2fbe4961b658eabfbd56c065 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3d574b1d2fbe4961b658eabfbd56c065 2024-12-08T11:20:38,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3d574b1d2fbe4961b658eabfbd56c065, entries=150, sequenceid=199, filesize=11.9 K 2024-12-08T11:20:38,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/ead76508fb4d4ddaae8b57c2bc74306c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/ead76508fb4d4ddaae8b57c2bc74306c 2024-12-08T11:20:38,706 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/ead76508fb4d4ddaae8b57c2bc74306c, entries=150, sequenceid=199, filesize=11.9 K 2024-12-08T11:20:38,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for e39fb8694c3bacc24718f77e81d6379f in 553ms, sequenceid=199, compaction requested=false 2024-12-08T11:20:38,707 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:38,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T11:20:38,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:38,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:20:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:38,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:38,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208112b53ad5ee143bf806a77cc13d0e4f2_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656838172/Put/seqid=0 2024-12-08T11:20:38,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656898796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656898797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656898797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656898800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656898801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:38,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:38,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:38,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:38,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:38,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:38,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:38,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742024_1200 (size=12304) 2024-12-08T11:20:38,849 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:38,854 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208112b53ad5ee143bf806a77cc13d0e4f2_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208112b53ad5ee143bf806a77cc13d0e4f2_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:38,856 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/e3df298d85994093bd6cc1441fdd8163, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:38,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/e3df298d85994093bd6cc1441fdd8163 is 175, key is test_row_0/A:col10/1733656838172/Put/seqid=0 2024-12-08T11:20:38,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742025_1201 (size=31105) 2024-12-08T11:20:38,893 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=222, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/e3df298d85994093bd6cc1441fdd8163 2024-12-08T11:20:38,902 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656898902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656898902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656898904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,907 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/a789cea8836c499bbfec46d4b0fdb557 is 50, key is test_row_0/B:col10/1733656838172/Put/seqid=0 2024-12-08T11:20:38,910 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656898906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,911 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:38,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656898908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742026_1202 (size=12151) 2024-12-08T11:20:38,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T11:20:38,988 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:38,989 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:38,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:38,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:38,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:38,989 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:38,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:38,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656899104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656899105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656899109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656899113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656899113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,141 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,141 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:39,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:39,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T11:20:39,294 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,294 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:39,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:39,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/a789cea8836c499bbfec46d4b0fdb557 2024-12-08T11:20:39,340 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4 is 50, key is test_row_0/C:col10/1733656838172/Put/seqid=0 2024-12-08T11:20:39,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742027_1203 (size=12151) 2024-12-08T11:20:39,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656899408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,410 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656899409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656899411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656899415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,420 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656899417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,447 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,600 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,750 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4 2024-12-08T11:20:39,753 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,754 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:39,757 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/e3df298d85994093bd6cc1441fdd8163 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e3df298d85994093bd6cc1441fdd8163 2024-12-08T11:20:39,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e3df298d85994093bd6cc1441fdd8163, entries=150, sequenceid=222, filesize=30.4 K 2024-12-08T11:20:39,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/a789cea8836c499bbfec46d4b0fdb557 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a789cea8836c499bbfec46d4b0fdb557 2024-12-08T11:20:39,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a789cea8836c499bbfec46d4b0fdb557, entries=150, sequenceid=222, filesize=11.9 K 2024-12-08T11:20:39,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4 2024-12-08T11:20:39,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4, entries=150, sequenceid=222, filesize=11.9 K 2024-12-08T11:20:39,775 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for e39fb8694c3bacc24718f77e81d6379f in 991ms, sequenceid=222, compaction requested=true 2024-12-08T11:20:39,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:39,776 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:39,776 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:39,776 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:39,777 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:39,777 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:39,777 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,777 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/c96d462c2f89415a8fd778a2c5c28f55, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/36f0a5abc1094ed48a16d6d87fba4d53, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e3df298d85994093bd6cc1441fdd8163] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=100.0 K 2024-12-08T11:20:39,778 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,778 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/c96d462c2f89415a8fd778a2c5c28f55, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/36f0a5abc1094ed48a16d6d87fba4d53, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e3df298d85994093bd6cc1441fdd8163] 2024-12-08T11:20:39,778 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:39,778 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:39,778 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,778 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/dc37d8162b2e45e9ba3461ce6bb30661, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3d574b1d2fbe4961b658eabfbd56c065, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a789cea8836c499bbfec46d4b0fdb557] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=36.0 K 2024-12-08T11:20:39,779 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting c96d462c2f89415a8fd778a2c5c28f55, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733656836370 2024-12-08T11:20:39,779 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting dc37d8162b2e45e9ba3461ce6bb30661, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733656836370 2024-12-08T11:20:39,779 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36f0a5abc1094ed48a16d6d87fba4d53, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733656837029 2024-12-08T11:20:39,779 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d574b1d2fbe4961b658eabfbd56c065, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733656837029 2024-12-08T11:20:39,779 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3df298d85994093bd6cc1441fdd8163, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733656838164 2024-12-08T11:20:39,779 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a789cea8836c499bbfec46d4b0fdb557, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733656838164 2024-12-08T11:20:39,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T11:20:39,788 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:39,790 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#172 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:39,791 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/7dc8fcfc8f45463cba249e35cec7130f is 50, key is test_row_0/B:col10/1733656838172/Put/seqid=0 2024-12-08T11:20:39,791 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208afd21af068314bd292b5888ad50b9fcc_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:39,793 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208afd21af068314bd292b5888ad50b9fcc_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:39,793 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208afd21af068314bd292b5888ad50b9fcc_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:39,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742029_1205 (size=4469) 2024-12-08T11:20:39,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742028_1204 (size=12663) 2024-12-08T11:20:39,811 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/7dc8fcfc8f45463cba249e35cec7130f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7dc8fcfc8f45463cba249e35cec7130f 2024-12-08T11:20:39,816 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into 7dc8fcfc8f45463cba249e35cec7130f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:39,816 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:39,817 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=13, startTime=1733656839776; duration=0sec 2024-12-08T11:20:39,817 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:39,817 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:39,817 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:39,819 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:39,819 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:39,819 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,819 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/817d89d220564147aeb6278d79f00240, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/ead76508fb4d4ddaae8b57c2bc74306c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=36.0 K 2024-12-08T11:20:39,819 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 817d89d220564147aeb6278d79f00240, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1733656836370 2024-12-08T11:20:39,820 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ead76508fb4d4ddaae8b57c2bc74306c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1733656837029 2024-12-08T11:20:39,820 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f3d2a8a5c5da4a3d8bf9ceb384219ff4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733656838164 2024-12-08T11:20:39,832 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#173 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:39,833 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/180943d730024c9e9ccb3501b94c5e9f is 50, key is test_row_0/C:col10/1733656838172/Put/seqid=0 2024-12-08T11:20:39,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742030_1206 (size=12663) 2024-12-08T11:20:39,851 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/180943d730024c9e9ccb3501b94c5e9f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/180943d730024c9e9ccb3501b94c5e9f 2024-12-08T11:20:39,858 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into 180943d730024c9e9ccb3501b94c5e9f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:39,858 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:39,858 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=13, startTime=1733656839776; duration=0sec 2024-12-08T11:20:39,858 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:39,858 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:39,908 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-08T11:20:39,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:39,910 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T11:20:39,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:39,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:39,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:39,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:39,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:39,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:39,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:39,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:39,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120813ed2350e4574c9ab50184aa4e42c3ec_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656838794/Put/seqid=0 2024-12-08T11:20:39,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742031_1207 (size=12304) 2024-12-08T11:20:39,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:39,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656899934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656899934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656899934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656899935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:39,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656899938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:39,943 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120813ed2350e4574c9ab50184aa4e42c3ec_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120813ed2350e4574c9ab50184aa4e42c3ec_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:39,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b774b4e5dbcc442a82b2ed1fb583a7e8, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:39,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b774b4e5dbcc442a82b2ed1fb583a7e8 is 175, key is test_row_0/A:col10/1733656838794/Put/seqid=0 2024-12-08T11:20:39,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742032_1208 (size=31105) 2024-12-08T11:20:39,957 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b774b4e5dbcc442a82b2ed1fb583a7e8 2024-12-08T11:20:39,969 INFO [master/355ef6e50110:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-08T11:20:39,969 INFO [master/355ef6e50110:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-08T11:20:39,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/494b18249d8d4c208122ff962e969416 is 50, key is test_row_0/B:col10/1733656838794/Put/seqid=0 2024-12-08T11:20:39,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742033_1209 (size=12151) 2024-12-08T11:20:39,999 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/494b18249d8d4c208122ff962e969416 2024-12-08T11:20:40,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/88916ab748fa48ad86cee75730a95ddf is 50, key is test_row_0/C:col10/1733656838794/Put/seqid=0 2024-12-08T11:20:40,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742034_1210 (size=12151) 2024-12-08T11:20:40,027 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/88916ab748fa48ad86cee75730a95ddf 2024-12-08T11:20:40,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656900039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656900040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656900041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656900041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656900041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b774b4e5dbcc442a82b2ed1fb583a7e8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b774b4e5dbcc442a82b2ed1fb583a7e8 2024-12-08T11:20:40,051 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b774b4e5dbcc442a82b2ed1fb583a7e8, entries=150, sequenceid=238, filesize=30.4 K 2024-12-08T11:20:40,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/494b18249d8d4c208122ff962e969416 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/494b18249d8d4c208122ff962e969416 2024-12-08T11:20:40,057 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/494b18249d8d4c208122ff962e969416, entries=150, sequenceid=238, filesize=11.9 K 2024-12-08T11:20:40,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/88916ab748fa48ad86cee75730a95ddf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/88916ab748fa48ad86cee75730a95ddf 2024-12-08T11:20:40,063 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/88916ab748fa48ad86cee75730a95ddf, entries=150, sequenceid=238, filesize=11.9 K 2024-12-08T11:20:40,065 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for e39fb8694c3bacc24718f77e81d6379f in 155ms, sequenceid=238, compaction requested=false 2024-12-08T11:20:40,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:40,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:40,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-08T11:20:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-08T11:20:40,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-08T11:20:40,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3860 sec 2024-12-08T11:20:40,070 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.3920 sec 2024-12-08T11:20:40,205 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#171 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:40,206 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6aabc2080a194c5c933fed145d34732b is 175, key is test_row_0/A:col10/1733656838172/Put/seqid=0 2024-12-08T11:20:40,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742035_1211 (size=31617) 2024-12-08T11:20:40,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:40,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T11:20:40,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:40,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:40,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:40,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:40,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:40,247 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:40,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a010488537404511a2fa8c235d25059f_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656840245/Put/seqid=0 2024-12-08T11:20:40,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656900253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656900254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656900254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656900255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656900255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742036_1212 (size=14944) 2024-12-08T11:20:40,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656900357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656900360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656900360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656900360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656900360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656900559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656900561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,563 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656900562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656900562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656900564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,618 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6aabc2080a194c5c933fed145d34732b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6aabc2080a194c5c933fed145d34732b 2024-12-08T11:20:40,624 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into 6aabc2080a194c5c933fed145d34732b(size=30.9 K), total size for store is 61.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:40,625 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:40,625 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=13, startTime=1733656839776; duration=0sec 2024-12-08T11:20:40,625 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:40,625 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:40,662 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:40,668 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a010488537404511a2fa8c235d25059f_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a010488537404511a2fa8c235d25059f_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:40,669 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/1338df401abe4f109b73e8e2dbf2f428, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:40,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/1338df401abe4f109b73e8e2dbf2f428 is 175, key is test_row_0/A:col10/1733656840245/Put/seqid=0 2024-12-08T11:20:40,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742037_1213 (size=39899) 2024-12-08T11:20:40,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-08T11:20:40,787 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-08T11:20:40,788 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:40,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-08T11:20:40,790 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T11:20:40,790 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:40,791 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:40,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656900860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,866 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656900864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656900866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656900867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656900870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T11:20:40,942 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:40,943 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:40,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:40,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:40,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:40,943 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:40,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:40,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,077 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=262, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/1338df401abe4f109b73e8e2dbf2f428 2024-12-08T11:20:41,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/1e3b4b59e794488b85274b6be45d286c is 50, key is test_row_0/B:col10/1733656840245/Put/seqid=0 2024-12-08T11:20:41,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T11:20:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742038_1214 (size=12301) 2024-12-08T11:20:41,098 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,098 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:41,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:41,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,099 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,099 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,251 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,251 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:41,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,252 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:41,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656901367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:41,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656901369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:41,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656901369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:41,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656901372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:41,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656901376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T11:20:41,404 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:41,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:41,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,405 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/1e3b4b59e794488b85274b6be45d286c 2024-12-08T11:20:41,502 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/71777066f7ad48d6b0ffd0a02ca51482 is 50, key is test_row_0/C:col10/1733656840245/Put/seqid=0 2024-12-08T11:20:41,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742039_1215 (size=12301) 2024-12-08T11:20:41,557 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:41,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:41,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,705 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc3ee21d1a6433a with lease ID 0x2f13ac210a72d84: from storage DS-00446ebe-3965-4a0f-bd76-838d1c916da5 node DatanodeRegistration(127.0.0.1:42827, datanodeUuid=5d6ecd1b-00f9-49e2-800a-270a73fe2319, infoPort=33571, infoSecurePort=0, ipcPort=33953, storageInfo=lv=-57;cid=testClusterID;nsid=1271048654;c=1733656791850), blocks: 103, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 1 2024-12-08T11:20:41,706 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfc3ee21d1a6433a with lease ID 0x2f13ac210a72d84: from storage DS-ca89a847-938f-49ad-80c5-6dfc8f2041c5 node DatanodeRegistration(127.0.0.1:42827, datanodeUuid=5d6ecd1b-00f9-49e2-800a-270a73fe2319, infoPort=33571, infoSecurePort=0, ipcPort=33953, storageInfo=lv=-57;cid=testClusterID;nsid=1271048654;c=1733656791850), blocks: 106, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 1 2024-12-08T11:20:41,709 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,709 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:41,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:41,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,710 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,862 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:41,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:41,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:41,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:41,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T11:20:41,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=262 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/71777066f7ad48d6b0ffd0a02ca51482 2024-12-08T11:20:41,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/1338df401abe4f109b73e8e2dbf2f428 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/1338df401abe4f109b73e8e2dbf2f428 2024-12-08T11:20:41,921 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/1338df401abe4f109b73e8e2dbf2f428, entries=200, sequenceid=262, filesize=39.0 K 2024-12-08T11:20:41,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/1e3b4b59e794488b85274b6be45d286c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/1e3b4b59e794488b85274b6be45d286c 2024-12-08T11:20:41,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/1e3b4b59e794488b85274b6be45d286c, entries=150, sequenceid=262, filesize=12.0 K 2024-12-08T11:20:41,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/71777066f7ad48d6b0ffd0a02ca51482 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/71777066f7ad48d6b0ffd0a02ca51482 2024-12-08T11:20:41,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/71777066f7ad48d6b0ffd0a02ca51482, entries=150, sequenceid=262, filesize=12.0 K 2024-12-08T11:20:41,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for e39fb8694c3bacc24718f77e81d6379f in 1689ms, sequenceid=262, compaction requested=true 2024-12-08T11:20:41,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:41,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:41,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:41,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:41,934 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:41,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:41,935 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:41,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:41,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:41,936 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:41,937 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:41,937 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,937 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7dc8fcfc8f45463cba249e35cec7130f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/494b18249d8d4c208122ff962e969416, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/1e3b4b59e794488b85274b6be45d286c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=36.2 K 2024-12-08T11:20:41,937 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102621 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:41,937 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:41,937 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,937 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6aabc2080a194c5c933fed145d34732b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b774b4e5dbcc442a82b2ed1fb583a7e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/1338df401abe4f109b73e8e2dbf2f428] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=100.2 K 2024-12-08T11:20:41,937 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:41,937 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6aabc2080a194c5c933fed145d34732b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b774b4e5dbcc442a82b2ed1fb583a7e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/1338df401abe4f109b73e8e2dbf2f428] 2024-12-08T11:20:41,938 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6aabc2080a194c5c933fed145d34732b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733656838164 2024-12-08T11:20:41,938 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dc8fcfc8f45463cba249e35cec7130f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733656838164 2024-12-08T11:20:41,938 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 494b18249d8d4c208122ff962e969416, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733656838794 2024-12-08T11:20:41,939 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b774b4e5dbcc442a82b2ed1fb583a7e8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733656838794 2024-12-08T11:20:41,939 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e3b4b59e794488b85274b6be45d286c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733656839936 2024-12-08T11:20:41,939 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1338df401abe4f109b73e8e2dbf2f428, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733656839930 2024-12-08T11:20:41,957 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:41,958 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#180 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:41,959 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3cd8607f74db4312a07130e062f0e4e7 is 50, key is test_row_0/B:col10/1733656840245/Put/seqid=0 2024-12-08T11:20:41,961 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412080cfbd9c55b8a4396bcba0409f31ad3a7_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:41,963 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412080cfbd9c55b8a4396bcba0409f31ad3a7_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:41,964 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412080cfbd9c55b8a4396bcba0409f31ad3a7_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:41,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742040_1216 (size=4469) 2024-12-08T11:20:41,978 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#181 average throughput is 1.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:41,979 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2 is 175, key is test_row_0/A:col10/1733656840245/Put/seqid=0 2024-12-08T11:20:42,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742042_1218 (size=31869) 2024-12-08T11:20:42,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742041_1217 (size=12915) 2024-12-08T11:20:42,013 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/3cd8607f74db4312a07130e062f0e4e7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3cd8607f74db4312a07130e062f0e4e7 2024-12-08T11:20:42,015 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-08T11:20:42,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:42,017 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:20:42,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:42,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:42,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:42,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:42,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:42,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:42,023 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into 3cd8607f74db4312a07130e062f0e4e7(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:42,023 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:42,023 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=13, startTime=1733656841934; duration=0sec 2024-12-08T11:20:42,023 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:42,024 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:42,024 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:42,026 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37115 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:42,026 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:42,026 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:42,026 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/180943d730024c9e9ccb3501b94c5e9f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/88916ab748fa48ad86cee75730a95ddf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/71777066f7ad48d6b0ffd0a02ca51482] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=36.2 K 2024-12-08T11:20:42,027 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 180943d730024c9e9ccb3501b94c5e9f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1733656838164 2024-12-08T11:20:42,027 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 88916ab748fa48ad86cee75730a95ddf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1733656838794 2024-12-08T11:20:42,028 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 71777066f7ad48d6b0ffd0a02ca51482, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733656839936 2024-12-08T11:20:42,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120836e13d436db9442fa8f6efb49015abf1_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656840248/Put/seqid=0 2024-12-08T11:20:42,038 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#183 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:42,039 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/48e0e40eed504e6eaf46a86f2db41006 is 50, key is test_row_0/C:col10/1733656840245/Put/seqid=0 2024-12-08T11:20:42,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742044_1220 (size=12915) 2024-12-08T11:20:42,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742043_1219 (size=12454) 2024-12-08T11:20:42,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:42,068 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120836e13d436db9442fa8f6efb49015abf1_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120836e13d436db9442fa8f6efb49015abf1_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:42,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/f7f2036787d44e6298ea444e2483b95b, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:42,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/f7f2036787d44e6298ea444e2483b95b is 175, key is test_row_0/A:col10/1733656840248/Put/seqid=0 2024-12-08T11:20:42,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742045_1221 (size=31255) 2024-12-08T11:20:42,077 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=276, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/f7f2036787d44e6298ea444e2483b95b 2024-12-08T11:20:42,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d85ae914356d4f179c6d01b0c418a11f is 50, key is test_row_0/B:col10/1733656840248/Put/seqid=0 2024-12-08T11:20:42,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742046_1222 (size=12301) 2024-12-08T11:20:42,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:42,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:42,405 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2 2024-12-08T11:20:42,433 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into d1c9b0096a864c4cbc3dc36bbdfd8ff2(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:42,434 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:42,434 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=13, startTime=1733656841934; duration=0sec 2024-12-08T11:20:42,434 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:42,434 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:42,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656902441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656902442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656902442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,444 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656902442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656902443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,458 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/48e0e40eed504e6eaf46a86f2db41006 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/48e0e40eed504e6eaf46a86f2db41006 2024-12-08T11:20:42,463 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into 48e0e40eed504e6eaf46a86f2db41006(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:42,463 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:42,464 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=13, startTime=1733656841935; duration=0sec 2024-12-08T11:20:42,464 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:42,464 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:42,490 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d85ae914356d4f179c6d01b0c418a11f 2024-12-08T11:20:42,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/32872f1588a444f7bad84cfeeee48005 is 50, key is test_row_0/C:col10/1733656840248/Put/seqid=0 2024-12-08T11:20:42,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742047_1223 (size=12301) 2024-12-08T11:20:42,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656902544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656902545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656902545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656902546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,547 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656902546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656902746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656902748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656902748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656902748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656902749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:42,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T11:20:42,903 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/32872f1588a444f7bad84cfeeee48005 2024-12-08T11:20:42,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/f7f2036787d44e6298ea444e2483b95b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7f2036787d44e6298ea444e2483b95b 2024-12-08T11:20:42,915 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7f2036787d44e6298ea444e2483b95b, entries=150, sequenceid=276, filesize=30.5 K 2024-12-08T11:20:42,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/d85ae914356d4f179c6d01b0c418a11f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d85ae914356d4f179c6d01b0c418a11f 2024-12-08T11:20:42,921 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d85ae914356d4f179c6d01b0c418a11f, entries=150, sequenceid=276, filesize=12.0 K 2024-12-08T11:20:42,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/32872f1588a444f7bad84cfeeee48005 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/32872f1588a444f7bad84cfeeee48005 2024-12-08T11:20:42,930 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/32872f1588a444f7bad84cfeeee48005, entries=150, sequenceid=276, filesize=12.0 K 2024-12-08T11:20:42,931 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for e39fb8694c3bacc24718f77e81d6379f in 914ms, sequenceid=276, compaction requested=false 2024-12-08T11:20:42,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:42,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:42,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-08T11:20:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-08T11:20:42,936 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-08T11:20:42,936 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1420 sec 2024-12-08T11:20:42,938 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.1490 sec 2024-12-08T11:20:43,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:43,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:20:43,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:43,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:43,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:43,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:43,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:43,050 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:43,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081737ac8d91a641099368cc80b44c7d0f_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656842441/Put/seqid=0 2024-12-08T11:20:43,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656903058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656903058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656903059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656903063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656903065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742048_1224 (size=12454) 2024-12-08T11:20:43,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656903165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656903166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656903167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656903368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656903368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656903369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,471 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:43,475 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081737ac8d91a641099368cc80b44c7d0f_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081737ac8d91a641099368cc80b44c7d0f_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:43,476 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/49f34a105afd4ae7b38175d8a30dd101, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:43,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/49f34a105afd4ae7b38175d8a30dd101 is 175, key is test_row_0/A:col10/1733656842441/Put/seqid=0 2024-12-08T11:20:43,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742049_1225 (size=31255) 2024-12-08T11:20:43,481 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/49f34a105afd4ae7b38175d8a30dd101 2024-12-08T11:20:43,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/f4d2107bb3694cdc8836c565aaedce68 is 50, key is test_row_0/B:col10/1733656842441/Put/seqid=0 2024-12-08T11:20:43,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742050_1226 (size=12301) 2024-12-08T11:20:43,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/f4d2107bb3694cdc8836c565aaedce68 2024-12-08T11:20:43,508 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/72fae324842d44248edc4fdc92a3ab06 is 50, key is test_row_0/C:col10/1733656842441/Put/seqid=0 2024-12-08T11:20:43,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742051_1227 (size=12301) 2024-12-08T11:20:43,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/72fae324842d44248edc4fdc92a3ab06 2024-12-08T11:20:43,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/49f34a105afd4ae7b38175d8a30dd101 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/49f34a105afd4ae7b38175d8a30dd101 2024-12-08T11:20:43,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/49f34a105afd4ae7b38175d8a30dd101, entries=150, sequenceid=303, filesize=30.5 K 2024-12-08T11:20:43,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/f4d2107bb3694cdc8836c565aaedce68 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f4d2107bb3694cdc8836c565aaedce68 2024-12-08T11:20:43,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f4d2107bb3694cdc8836c565aaedce68, entries=150, sequenceid=303, filesize=12.0 K 2024-12-08T11:20:43,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/72fae324842d44248edc4fdc92a3ab06 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/72fae324842d44248edc4fdc92a3ab06 2024-12-08T11:20:43,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/72fae324842d44248edc4fdc92a3ab06, entries=150, sequenceid=303, filesize=12.0 K 2024-12-08T11:20:43,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for e39fb8694c3bacc24718f77e81d6379f in 491ms, sequenceid=303, compaction requested=true 2024-12-08T11:20:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:43,540 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:43,540 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:43,541 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:43,541 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:43,541 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:43,541 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94379 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:43,541 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:43,541 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3cd8607f74db4312a07130e062f0e4e7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d85ae914356d4f179c6d01b0c418a11f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f4d2107bb3694cdc8836c565aaedce68] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=36.6 K 2024-12-08T11:20:43,542 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:43,542 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7f2036787d44e6298ea444e2483b95b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/49f34a105afd4ae7b38175d8a30dd101] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=92.2 K 2024-12-08T11:20:43,542 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:43,542 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7f2036787d44e6298ea444e2483b95b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/49f34a105afd4ae7b38175d8a30dd101] 2024-12-08T11:20:43,542 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cd8607f74db4312a07130e062f0e4e7, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733656839936 2024-12-08T11:20:43,543 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d85ae914356d4f179c6d01b0c418a11f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733656840248 2024-12-08T11:20:43,544 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1c9b0096a864c4cbc3dc36bbdfd8ff2, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733656839936 2024-12-08T11:20:43,545 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f4d2107bb3694cdc8836c565aaedce68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733656842441 2024-12-08T11:20:43,545 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7f2036787d44e6298ea444e2483b95b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733656840248 2024-12-08T11:20:43,545 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49f34a105afd4ae7b38175d8a30dd101, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733656842441 2024-12-08T11:20:43,553 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:43,554 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#189 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:43,555 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/af5cd8cd98c74773ac452fd0c4e96002 is 50, key is test_row_0/B:col10/1733656842441/Put/seqid=0 2024-12-08T11:20:43,556 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120835c8eefb3db14ce2840d98b85cef0fbd_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:43,558 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120835c8eefb3db14ce2840d98b85cef0fbd_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:43,558 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120835c8eefb3db14ce2840d98b85cef0fbd_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:43,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742052_1228 (size=13017) 2024-12-08T11:20:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:43,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T11:20:43,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:43,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:43,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:43,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:43,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:43,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:43,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742053_1229 (size=4469) 2024-12-08T11:20:43,578 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087ad28a6a0b7349cc9f258375b66e3ad5_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656843057/Put/seqid=0 2024-12-08T11:20:43,582 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#190 average throughput is 0.84 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:43,583 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/22418d72c40b48799bf3d91b7a065ed9 is 175, key is test_row_0/A:col10/1733656842441/Put/seqid=0 2024-12-08T11:20:43,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742055_1231 (size=31971) 2024-12-08T11:20:43,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656903607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742054_1230 (size=12454) 2024-12-08T11:20:43,614 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656903611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,619 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/22418d72c40b48799bf3d91b7a065ed9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/22418d72c40b48799bf3d91b7a065ed9 2024-12-08T11:20:43,627 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into 22418d72c40b48799bf3d91b7a065ed9(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:43,627 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:43,627 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=13, startTime=1733656843540; duration=0sec 2024-12-08T11:20:43,627 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:43,627 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:43,628 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:43,629 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:43,629 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:43,629 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:43,629 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/48e0e40eed504e6eaf46a86f2db41006, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/32872f1588a444f7bad84cfeeee48005, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/72fae324842d44248edc4fdc92a3ab06] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=36.6 K 2024-12-08T11:20:43,629 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48e0e40eed504e6eaf46a86f2db41006, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=262, earliestPutTs=1733656839936 2024-12-08T11:20:43,630 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32872f1588a444f7bad84cfeeee48005, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1733656840248 2024-12-08T11:20:43,630 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72fae324842d44248edc4fdc92a3ab06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733656842441 2024-12-08T11:20:43,638 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#192 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:43,639 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a351b8c71561470484babae2df4b7c12 is 50, key is test_row_0/C:col10/1733656842441/Put/seqid=0 2024-12-08T11:20:43,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742056_1232 (size=13017) 2024-12-08T11:20:43,658 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a351b8c71561470484babae2df4b7c12 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a351b8c71561470484babae2df4b7c12 2024-12-08T11:20:43,666 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into a351b8c71561470484babae2df4b7c12(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:43,666 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:43,666 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=13, startTime=1733656843540; duration=0sec 2024-12-08T11:20:43,666 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:43,666 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:43,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656903669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656903672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656903673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656903712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656903716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,914 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656903914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:43,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656903918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:43,969 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/af5cd8cd98c74773ac452fd0c4e96002 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/af5cd8cd98c74773ac452fd0c4e96002 2024-12-08T11:20:43,974 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into af5cd8cd98c74773ac452fd0c4e96002(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:43,974 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:43,975 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=13, startTime=1733656843540; duration=0sec 2024-12-08T11:20:43,975 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:43,975 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:44,013 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:44,017 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087ad28a6a0b7349cc9f258375b66e3ad5_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087ad28a6a0b7349cc9f258375b66e3ad5_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:44,018 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/8b213f229d3a47e3bb3db83075f611ce, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:44,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/8b213f229d3a47e3bb3db83075f611ce is 175, key is test_row_0/A:col10/1733656843057/Put/seqid=0 2024-12-08T11:20:44,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742057_1233 (size=31255) 2024-12-08T11:20:44,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:44,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656904174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:44,177 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656904176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:44,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:44,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656904179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:44,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:44,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656904217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:44,221 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:44,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656904221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:44,424 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/8b213f229d3a47e3bb3db83075f611ce 2024-12-08T11:20:44,434 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/05bdb5bfaa8b437b93b296b1545e1ddf is 50, key is test_row_0/B:col10/1733656843057/Put/seqid=0 2024-12-08T11:20:44,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742058_1234 (size=12301) 2024-12-08T11:20:44,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:44,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656904719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:44,728 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:44,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656904726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:44,839 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/05bdb5bfaa8b437b93b296b1545e1ddf 2024-12-08T11:20:44,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/59bdeb7d247e4beda49d57c3380114f5 is 50, key is test_row_0/C:col10/1733656843057/Put/seqid=0 2024-12-08T11:20:44,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742059_1235 (size=12301) 2024-12-08T11:20:44,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-08T11:20:44,895 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-08T11:20:44,896 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:44,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-12-08T11:20:44,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T11:20:44,898 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:44,898 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:44,898 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:44,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T11:20:45,050 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-08T11:20:45,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:45,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:45,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:45,051 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:45,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:45,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:45,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:45,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656905182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:45,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656905182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:45,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656905186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T11:20:45,204 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-08T11:20:45,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:45,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:45,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:45,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:45,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:45,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:45,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/59bdeb7d247e4beda49d57c3380114f5 2024-12-08T11:20:45,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/8b213f229d3a47e3bb3db83075f611ce as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/8b213f229d3a47e3bb3db83075f611ce 2024-12-08T11:20:45,265 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/8b213f229d3a47e3bb3db83075f611ce, entries=150, sequenceid=318, filesize=30.5 K 2024-12-08T11:20:45,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/05bdb5bfaa8b437b93b296b1545e1ddf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/05bdb5bfaa8b437b93b296b1545e1ddf 2024-12-08T11:20:45,271 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/05bdb5bfaa8b437b93b296b1545e1ddf, entries=150, sequenceid=318, filesize=12.0 K 2024-12-08T11:20:45,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/59bdeb7d247e4beda49d57c3380114f5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/59bdeb7d247e4beda49d57c3380114f5 2024-12-08T11:20:45,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/59bdeb7d247e4beda49d57c3380114f5, entries=150, sequenceid=318, filesize=12.0 K 2024-12-08T11:20:45,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for e39fb8694c3bacc24718f77e81d6379f in 1709ms, sequenceid=318, compaction requested=false 2024-12-08T11:20:45,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:45,357 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-12-08T11:20:45,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:45,358 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-08T11:20:45,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:45,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:45,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:45,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:45,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:45,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:45,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120849502b4568da4395a484c23de0ac432a_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656843600/Put/seqid=0 2024-12-08T11:20:45,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742060_1236 (size=12454) 2024-12-08T11:20:45,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:45,380 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120849502b4568da4395a484c23de0ac432a_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120849502b4568da4395a484c23de0ac432a_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:45,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/9fad12c37fc7480696506d58853c85a5, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:45,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/9fad12c37fc7480696506d58853c85a5 is 175, key is test_row_0/A:col10/1733656843600/Put/seqid=0 2024-12-08T11:20:45,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742061_1237 (size=31255) 2024-12-08T11:20:45,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T11:20:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:45,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:45,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:45,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656905752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:45,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656905752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,789 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/9fad12c37fc7480696506d58853c85a5 2024-12-08T11:20:45,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/b2bdfce06b444543b4ade288bcc1286f is 50, key is test_row_0/B:col10/1733656843600/Put/seqid=0 2024-12-08T11:20:45,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742062_1238 (size=12301) 2024-12-08T11:20:45,812 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/b2bdfce06b444543b4ade288bcc1286f 2024-12-08T11:20:45,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/30d4b72205194100b9da7b23ef4e650d is 50, key is test_row_0/C:col10/1733656843600/Put/seqid=0 2024-12-08T11:20:45,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742063_1239 (size=12301) 2024-12-08T11:20:45,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656905855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:45,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:45,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656905857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T11:20:46,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656906060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656906059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,226 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/30d4b72205194100b9da7b23ef4e650d 2024-12-08T11:20:46,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/9fad12c37fc7480696506d58853c85a5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/9fad12c37fc7480696506d58853c85a5 2024-12-08T11:20:46,236 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/9fad12c37fc7480696506d58853c85a5, entries=150, sequenceid=342, filesize=30.5 K 2024-12-08T11:20:46,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/b2bdfce06b444543b4ade288bcc1286f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b2bdfce06b444543b4ade288bcc1286f 2024-12-08T11:20:46,241 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b2bdfce06b444543b4ade288bcc1286f, entries=150, sequenceid=342, filesize=12.0 K 2024-12-08T11:20:46,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/30d4b72205194100b9da7b23ef4e650d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/30d4b72205194100b9da7b23ef4e650d 2024-12-08T11:20:46,248 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/30d4b72205194100b9da7b23ef4e650d, entries=150, sequenceid=342, filesize=12.0 K 2024-12-08T11:20:46,249 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for e39fb8694c3bacc24718f77e81d6379f in 891ms, sequenceid=342, compaction requested=true 2024-12-08T11:20:46,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:46,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:46,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-12-08T11:20:46,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-12-08T11:20:46,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-08T11:20:46,252 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3520 sec 2024-12-08T11:20:46,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.3570 sec 2024-12-08T11:20:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:46,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:20:46,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:46,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:46,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:46,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:46,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:46,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:46,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120832efe4fdc86946ae9e19a9d6651e50f4_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656845747/Put/seqid=0 2024-12-08T11:20:46,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742064_1240 (size=12454) 2024-12-08T11:20:46,480 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656906477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656906478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656906581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656906582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,791 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656906785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,791 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:46,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656906787, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:46,840 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:46,850 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120832efe4fdc86946ae9e19a9d6651e50f4_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120832efe4fdc86946ae9e19a9d6651e50f4_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:46,851 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/af07cbaae0b141de9ebd6b45218aa455, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:46,852 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/af07cbaae0b141de9ebd6b45218aa455 is 175, key is test_row_0/A:col10/1733656845747/Put/seqid=0 2024-12-08T11:20:46,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742065_1241 (size=31255) 2024-12-08T11:20:46,906 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=359, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/af07cbaae0b141de9ebd6b45218aa455 2024-12-08T11:20:46,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/9f0ae3c94e9345308aa5948fee289d93 is 50, key is test_row_0/B:col10/1733656845747/Put/seqid=0 2024-12-08T11:20:46,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742066_1242 (size=12301) 2024-12-08T11:20:47,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-08T11:20:47,002 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-08T11:20:47,003 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:47,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-12-08T11:20:47,005 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:47,006 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:47,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:47,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T11:20:47,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:47,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656907092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:47,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656907093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T11:20:47,158 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T11:20:47,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:47,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,159 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:47,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57770 deadline: 1733656907189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:47,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57720 deadline: 1733656907190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,194 DEBUG [Thread-700 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4129 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:20:47,195 DEBUG [Thread-696 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:20:47,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:47,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57814 deadline: 1733656907203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,205 DEBUG [Thread-698 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:20:47,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T11:20:47,311 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T11:20:47,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:47,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/9f0ae3c94e9345308aa5948fee289d93 2024-12-08T11:20:47,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a04a0799140443f9ad8f3cd35ccd4264 is 50, key is test_row_0/C:col10/1733656845747/Put/seqid=0 2024-12-08T11:20:47,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742067_1243 (size=12301) 2024-12-08T11:20:47,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T11:20:47,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:47,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,465 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:47,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57816 deadline: 1733656907601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:47,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57784 deadline: 1733656907604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T11:20:47,617 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T11:20:47,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:47,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,771 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,771 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T11:20:47,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:47,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,772 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:47,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a04a0799140443f9ad8f3cd35ccd4264 2024-12-08T11:20:47,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/af07cbaae0b141de9ebd6b45218aa455 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/af07cbaae0b141de9ebd6b45218aa455 2024-12-08T11:20:47,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/af07cbaae0b141de9ebd6b45218aa455, entries=150, sequenceid=359, filesize=30.5 K 2024-12-08T11:20:47,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/9f0ae3c94e9345308aa5948fee289d93 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/9f0ae3c94e9345308aa5948fee289d93 2024-12-08T11:20:47,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/9f0ae3c94e9345308aa5948fee289d93, entries=150, sequenceid=359, filesize=12.0 K 2024-12-08T11:20:47,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/a04a0799140443f9ad8f3cd35ccd4264 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a04a0799140443f9ad8f3cd35ccd4264 2024-12-08T11:20:47,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a04a0799140443f9ad8f3cd35ccd4264, entries=150, sequenceid=359, filesize=12.0 K 2024-12-08T11:20:47,882 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for e39fb8694c3bacc24718f77e81d6379f in 1487ms, sequenceid=359, compaction requested=true 2024-12-08T11:20:47,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:47,882 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:47,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:47,883 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:47,883 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:47,883 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:47,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:47,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store e39fb8694c3bacc24718f77e81d6379f:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:47,884 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:47,884 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:47,884 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/B is initiating minor compaction (all files) 2024-12-08T11:20:47,884 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/B in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,884 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/af5cd8cd98c74773ac452fd0c4e96002, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/05bdb5bfaa8b437b93b296b1545e1ddf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b2bdfce06b444543b4ade288bcc1286f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/9f0ae3c94e9345308aa5948fee289d93] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=48.8 K 2024-12-08T11:20:47,885 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125736 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:47,885 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/A is initiating minor compaction (all files) 2024-12-08T11:20:47,885 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting af5cd8cd98c74773ac452fd0c4e96002, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733656842441 2024-12-08T11:20:47,885 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/A in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,885 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/22418d72c40b48799bf3d91b7a065ed9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/8b213f229d3a47e3bb3db83075f611ce, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/9fad12c37fc7480696506d58853c85a5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/af07cbaae0b141de9ebd6b45218aa455] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=122.8 K 2024-12-08T11:20:47,885 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,885 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/22418d72c40b48799bf3d91b7a065ed9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/8b213f229d3a47e3bb3db83075f611ce, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/9fad12c37fc7480696506d58853c85a5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/af07cbaae0b141de9ebd6b45218aa455] 2024-12-08T11:20:47,885 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 05bdb5bfaa8b437b93b296b1545e1ddf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733656843057 2024-12-08T11:20:47,886 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 22418d72c40b48799bf3d91b7a065ed9, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733656842441 2024-12-08T11:20:47,886 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b2bdfce06b444543b4ade288bcc1286f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733656843599 2024-12-08T11:20:47,886 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f0ae3c94e9345308aa5948fee289d93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733656845747 2024-12-08T11:20:47,886 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b213f229d3a47e3bb3db83075f611ce, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733656843057 2024-12-08T11:20:47,887 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fad12c37fc7480696506d58853c85a5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733656843599 2024-12-08T11:20:47,888 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting af07cbaae0b141de9ebd6b45218aa455, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733656845747 2024-12-08T11:20:47,916 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:47,924 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#B#compaction#202 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:47,925 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/a1a3813f3fe94fc2847dbdd37f82e678 is 50, key is test_row_0/B:col10/1733656845747/Put/seqid=0 2024-12-08T11:20:47,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:47,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-12-08T11:20:47,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:47,928 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:20:47,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:47,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:47,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:47,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:47,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:47,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:47,931 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208c2bd3f9409714355a748cdac2f0efe23_e39fb8694c3bacc24718f77e81d6379f store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:47,933 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208c2bd3f9409714355a748cdac2f0efe23_e39fb8694c3bacc24718f77e81d6379f, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:47,933 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c2bd3f9409714355a748cdac2f0efe23_e39fb8694c3bacc24718f77e81d6379f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:47,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d906cda2d3af4b4c932e8efc910c0020_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656846471/Put/seqid=0 2024-12-08T11:20:47,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742068_1244 (size=13153) 2024-12-08T11:20:48,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742069_1245 (size=4469) 2024-12-08T11:20:48,015 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/a1a3813f3fe94fc2847dbdd37f82e678 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a1a3813f3fe94fc2847dbdd37f82e678 2024-12-08T11:20:48,023 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/B of e39fb8694c3bacc24718f77e81d6379f into a1a3813f3fe94fc2847dbdd37f82e678(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:48,023 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:48,023 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/B, priority=12, startTime=1733656847883; duration=0sec 2024-12-08T11:20:48,024 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:48,024 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:B 2024-12-08T11:20:48,024 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:20:48,026 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:20:48,026 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): e39fb8694c3bacc24718f77e81d6379f/C is initiating minor compaction (all files) 2024-12-08T11:20:48,026 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of e39fb8694c3bacc24718f77e81d6379f/C in TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:48,027 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a351b8c71561470484babae2df4b7c12, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/59bdeb7d247e4beda49d57c3380114f5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/30d4b72205194100b9da7b23ef4e650d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a04a0799140443f9ad8f3cd35ccd4264] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp, totalSize=48.8 K 2024-12-08T11:20:48,027 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a351b8c71561470484babae2df4b7c12, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1733656842441 2024-12-08T11:20:48,027 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 59bdeb7d247e4beda49d57c3380114f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1733656843057 2024-12-08T11:20:48,028 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 30d4b72205194100b9da7b23ef4e650d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=342, earliestPutTs=1733656843599 2024-12-08T11:20:48,028 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a04a0799140443f9ad8f3cd35ccd4264, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1733656845747 2024-12-08T11:20:48,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742070_1246 (size=12454) 2024-12-08T11:20:48,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:48,062 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d906cda2d3af4b4c932e8efc910c0020_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d906cda2d3af4b4c932e8efc910c0020_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:48,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6b646013b5b6425497abc2f317243659, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:48,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6b646013b5b6425497abc2f317243659 is 175, key is test_row_0/A:col10/1733656846471/Put/seqid=0 2024-12-08T11:20:48,071 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#C#compaction#204 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:48,072 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/c0866367a21f448d94cefedb190ed11b is 50, key is test_row_0/C:col10/1733656845747/Put/seqid=0 2024-12-08T11:20:48,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742072_1248 (size=13153) 2024-12-08T11:20:48,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742071_1247 (size=31255) 2024-12-08T11:20:48,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T11:20:48,112 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=379, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6b646013b5b6425497abc2f317243659 2024-12-08T11:20:48,114 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/c0866367a21f448d94cefedb190ed11b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/c0866367a21f448d94cefedb190ed11b 2024-12-08T11:20:48,122 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/C of e39fb8694c3bacc24718f77e81d6379f into c0866367a21f448d94cefedb190ed11b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:48,122 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:48,122 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/C, priority=12, startTime=1733656847884; duration=0sec 2024-12-08T11:20:48,122 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:48,122 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:C 2024-12-08T11:20:48,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/980ec910bd514278a6a0b7931f073234 is 50, key is test_row_0/B:col10/1733656846471/Put/seqid=0 2024-12-08T11:20:48,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742073_1249 (size=12301) 2024-12-08T11:20:48,184 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/980ec910bd514278a6a0b7931f073234 2024-12-08T11:20:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/0f34c92a5b16434dac2b1c6ef413e22f is 50, key is test_row_0/C:col10/1733656846471/Put/seqid=0 2024-12-08T11:20:48,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742074_1250 (size=12301) 2024-12-08T11:20:48,337 DEBUG [Thread-705 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:63801 2024-12-08T11:20:48,337 DEBUG [Thread-705 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:48,340 DEBUG [Thread-707 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:63801 2024-12-08T11:20:48,340 DEBUG [Thread-707 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:48,341 DEBUG [Thread-709 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:63801 2024-12-08T11:20:48,341 DEBUG [Thread-709 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:48,343 DEBUG [Thread-711 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:63801 2024-12-08T11:20:48,343 DEBUG [Thread-711 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:48,402 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): e39fb8694c3bacc24718f77e81d6379f#A#compaction#201 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:48,402 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/48114ce252bc4850bceac598f66f9d9a is 175, key is test_row_0/A:col10/1733656845747/Put/seqid=0 2024-12-08T11:20:48,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742075_1251 (size=32107) 2024-12-08T11:20:48,423 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/48114ce252bc4850bceac598f66f9d9a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/48114ce252bc4850bceac598f66f9d9a 2024-12-08T11:20:48,428 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in e39fb8694c3bacc24718f77e81d6379f/A of e39fb8694c3bacc24718f77e81d6379f into 48114ce252bc4850bceac598f66f9d9a(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:48,428 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:48,428 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f., storeName=e39fb8694c3bacc24718f77e81d6379f/A, priority=12, startTime=1733656847882; duration=0sec 2024-12-08T11:20:48,428 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:48,428 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: e39fb8694c3bacc24718f77e81d6379f:A 2024-12-08T11:20:48,611 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. as already flushing 2024-12-08T11:20:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:48,611 DEBUG [Thread-702 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:63801 2024-12-08T11:20:48,612 DEBUG [Thread-702 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:48,618 DEBUG [Thread-694 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c826820 to 127.0.0.1:63801 2024-12-08T11:20:48,619 DEBUG [Thread-694 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:48,657 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/0f34c92a5b16434dac2b1c6ef413e22f 2024-12-08T11:20:48,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/6b646013b5b6425497abc2f317243659 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6b646013b5b6425497abc2f317243659 2024-12-08T11:20:48,668 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6b646013b5b6425497abc2f317243659, entries=150, sequenceid=379, filesize=30.5 K 2024-12-08T11:20:48,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/980ec910bd514278a6a0b7931f073234 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/980ec910bd514278a6a0b7931f073234 2024-12-08T11:20:48,674 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/980ec910bd514278a6a0b7931f073234, entries=150, sequenceid=379, filesize=12.0 K 2024-12-08T11:20:48,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/0f34c92a5b16434dac2b1c6ef413e22f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0f34c92a5b16434dac2b1c6ef413e22f 2024-12-08T11:20:48,682 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0f34c92a5b16434dac2b1c6ef413e22f, entries=150, sequenceid=379, filesize=12.0 K 2024-12-08T11:20:48,683 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=13.42 KB/13740 for e39fb8694c3bacc24718f77e81d6379f in 755ms, sequenceid=379, compaction requested=false 2024-12-08T11:20:48,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:48,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:48,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-12-08T11:20:48,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-12-08T11:20:48,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-08T11:20:48,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6790 sec 2024-12-08T11:20:48,689 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 1.6850 sec 2024-12-08T11:20:49,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-12-08T11:20:49,113 INFO [Thread-704 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-12-08T11:20:51,212 DEBUG [Thread-696 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2931c73e to 127.0.0.1:63801 2024-12-08T11:20:51,212 DEBUG [Thread-696 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:51,235 DEBUG [Thread-700 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:63801 2024-12-08T11:20:51,235 DEBUG [Thread-698 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x176c5c1b to 127.0.0.1:63801 2024-12-08T11:20:51,235 DEBUG [Thread-700 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:51,235 DEBUG [Thread-698 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 72 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5520 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5329 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2378 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7133 rows 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2340 2024-12-08T11:20:51,240 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7017 rows 2024-12-08T11:20:51,240 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T11:20:51,240 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4e560c7b to 127.0.0.1:63801 2024-12-08T11:20:51,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:20:51,246 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T11:20:51,249 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T11:20:51,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:51,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T11:20:51,253 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656851253"}]},"ts":"1733656851253"} 2024-12-08T11:20:51,254 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T11:20:51,258 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T11:20:51,259 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:20:51,264 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, UNASSIGN}] 2024-12-08T11:20:51,265 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, UNASSIGN 2024-12-08T11:20:51,266 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:51,267 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:20:51,267 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:20:51,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T11:20:51,418 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:51,419 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:51,419 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:20:51,419 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing e39fb8694c3bacc24718f77e81d6379f, disabling compactions & flushes 2024-12-08T11:20:51,419 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:51,419 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:51,419 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. after waiting 0 ms 2024-12-08T11:20:51,419 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:51,419 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing e39fb8694c3bacc24718f77e81d6379f 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T11:20:51,420 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=A 2024-12-08T11:20:51,420 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:51,420 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=B 2024-12-08T11:20:51,420 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:51,420 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK e39fb8694c3bacc24718f77e81d6379f, store=C 2024-12-08T11:20:51,420 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:51,427 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087574c4b8f5e4428f80c1729d5d7de1da_e39fb8694c3bacc24718f77e81d6379f is 50, key is test_row_0/A:col10/1733656848617/Put/seqid=0 2024-12-08T11:20:51,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742076_1252 (size=12454) 2024-12-08T11:20:51,434 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:51,438 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087574c4b8f5e4428f80c1729d5d7de1da_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087574c4b8f5e4428f80c1729d5d7de1da_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:51,439 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b0642f1986aa445f99603184a2e4714c, store: [table=TestAcidGuarantees family=A region=e39fb8694c3bacc24718f77e81d6379f] 2024-12-08T11:20:51,440 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b0642f1986aa445f99603184a2e4714c is 175, key is test_row_0/A:col10/1733656848617/Put/seqid=0 2024-12-08T11:20:51,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742077_1253 (size=31255) 2024-12-08T11:20:51,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T11:20:51,846 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=390, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b0642f1986aa445f99603184a2e4714c 2024-12-08T11:20:51,853 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/5e836f198ad7484492658969b6c41111 is 50, key is test_row_0/B:col10/1733656848617/Put/seqid=0 2024-12-08T11:20:51,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T11:20:51,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742078_1254 (size=12301) 2024-12-08T11:20:52,258 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/5e836f198ad7484492658969b6c41111 2024-12-08T11:20:52,265 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5d5d0c792927461c9983266cbdb58d7a is 50, key is test_row_0/C:col10/1733656848617/Put/seqid=0 2024-12-08T11:20:52,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742079_1255 (size=12301) 2024-12-08T11:20:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T11:20:52,670 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5d5d0c792927461c9983266cbdb58d7a 2024-12-08T11:20:52,675 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/A/b0642f1986aa445f99603184a2e4714c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b0642f1986aa445f99603184a2e4714c 2024-12-08T11:20:52,680 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b0642f1986aa445f99603184a2e4714c, entries=150, sequenceid=390, filesize=30.5 K 2024-12-08T11:20:52,681 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/B/5e836f198ad7484492658969b6c41111 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5e836f198ad7484492658969b6c41111 2024-12-08T11:20:52,685 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5e836f198ad7484492658969b6c41111, entries=150, sequenceid=390, filesize=12.0 K 2024-12-08T11:20:52,686 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/.tmp/C/5d5d0c792927461c9983266cbdb58d7a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5d5d0c792927461c9983266cbdb58d7a 2024-12-08T11:20:52,690 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5d5d0c792927461c9983266cbdb58d7a, entries=150, sequenceid=390, filesize=12.0 K 2024-12-08T11:20:52,690 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for e39fb8694c3bacc24718f77e81d6379f in 1271ms, sequenceid=390, compaction requested=true 2024-12-08T11:20:52,691 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6e64c4ca25454ebbaf96fdf922a87091, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/7363d722660b4d7897cbb6a7b21bd6b9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/0082d0b25f3a43f4bef02f4b2999c278, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7dd4fa912224f449b6b91633e515433, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/214d70abf8f84bd9b15ae1e4cc5c111b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/147a859b1b72442ba71a11290139b7f3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/eae8e330f9b643799f5bb7125df6994c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/bb542e3b0a9a45f58c95b60dfd147867, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e9d629c2faad4eb5a15d055335643f79, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/3bb8d22aecc047a4ad9625319223fee7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d33d63d9b738421fb4690b4a47d84e9c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/58c4e3c2b3214f54b9947947b6a8df6b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/c96d462c2f89415a8fd778a2c5c28f55, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/36f0a5abc1094ed48a16d6d87fba4d53, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6aabc2080a194c5c933fed145d34732b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e3df298d85994093bd6cc1441fdd8163, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b774b4e5dbcc442a82b2ed1fb583a7e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/1338df401abe4f109b73e8e2dbf2f428, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7f2036787d44e6298ea444e2483b95b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/22418d72c40b48799bf3d91b7a065ed9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/49f34a105afd4ae7b38175d8a30dd101, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/8b213f229d3a47e3bb3db83075f611ce, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/9fad12c37fc7480696506d58853c85a5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/af07cbaae0b141de9ebd6b45218aa455] to archive 2024-12-08T11:20:52,692 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:20:52,694 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6e64c4ca25454ebbaf96fdf922a87091 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6e64c4ca25454ebbaf96fdf922a87091 2024-12-08T11:20:52,695 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/7363d722660b4d7897cbb6a7b21bd6b9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/7363d722660b4d7897cbb6a7b21bd6b9 2024-12-08T11:20:52,696 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/0082d0b25f3a43f4bef02f4b2999c278 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/0082d0b25f3a43f4bef02f4b2999c278 2024-12-08T11:20:52,697 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7dd4fa912224f449b6b91633e515433 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7dd4fa912224f449b6b91633e515433 2024-12-08T11:20:52,698 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/214d70abf8f84bd9b15ae1e4cc5c111b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/214d70abf8f84bd9b15ae1e4cc5c111b 2024-12-08T11:20:52,699 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/147a859b1b72442ba71a11290139b7f3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/147a859b1b72442ba71a11290139b7f3 2024-12-08T11:20:52,700 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/eae8e330f9b643799f5bb7125df6994c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/eae8e330f9b643799f5bb7125df6994c 2024-12-08T11:20:52,701 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/bb542e3b0a9a45f58c95b60dfd147867 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/bb542e3b0a9a45f58c95b60dfd147867 2024-12-08T11:20:52,702 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e9d629c2faad4eb5a15d055335643f79 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e9d629c2faad4eb5a15d055335643f79 2024-12-08T11:20:52,703 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/3bb8d22aecc047a4ad9625319223fee7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/3bb8d22aecc047a4ad9625319223fee7 2024-12-08T11:20:52,712 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d33d63d9b738421fb4690b4a47d84e9c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d33d63d9b738421fb4690b4a47d84e9c 2024-12-08T11:20:52,714 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/58c4e3c2b3214f54b9947947b6a8df6b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/58c4e3c2b3214f54b9947947b6a8df6b 2024-12-08T11:20:52,715 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/c96d462c2f89415a8fd778a2c5c28f55 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/c96d462c2f89415a8fd778a2c5c28f55 2024-12-08T11:20:52,717 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/36f0a5abc1094ed48a16d6d87fba4d53 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/36f0a5abc1094ed48a16d6d87fba4d53 2024-12-08T11:20:52,719 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6aabc2080a194c5c933fed145d34732b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6aabc2080a194c5c933fed145d34732b 2024-12-08T11:20:52,720 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e3df298d85994093bd6cc1441fdd8163 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/e3df298d85994093bd6cc1441fdd8163 2024-12-08T11:20:52,721 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b774b4e5dbcc442a82b2ed1fb583a7e8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b774b4e5dbcc442a82b2ed1fb583a7e8 2024-12-08T11:20:52,722 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/1338df401abe4f109b73e8e2dbf2f428 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/1338df401abe4f109b73e8e2dbf2f428 2024-12-08T11:20:52,723 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/d1c9b0096a864c4cbc3dc36bbdfd8ff2 2024-12-08T11:20:52,724 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7f2036787d44e6298ea444e2483b95b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/f7f2036787d44e6298ea444e2483b95b 2024-12-08T11:20:52,725 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/22418d72c40b48799bf3d91b7a065ed9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/22418d72c40b48799bf3d91b7a065ed9 2024-12-08T11:20:52,726 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/49f34a105afd4ae7b38175d8a30dd101 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/49f34a105afd4ae7b38175d8a30dd101 2024-12-08T11:20:52,727 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/8b213f229d3a47e3bb3db83075f611ce to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/8b213f229d3a47e3bb3db83075f611ce 2024-12-08T11:20:52,731 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/9fad12c37fc7480696506d58853c85a5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/9fad12c37fc7480696506d58853c85a5 2024-12-08T11:20:52,733 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/af07cbaae0b141de9ebd6b45218aa455 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/af07cbaae0b141de9ebd6b45218aa455 2024-12-08T11:20:52,736 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3bad87a005d34384aff2c70538d23fe3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3056591f8d634817bcaebb645c654e55, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/2177e16ef18c491f90b90dd625cea51c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7f62da32e92c4891bd3b7a18582abe24, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/755215a241ef4a61bb3f5e028892033d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d73d5fc120424c62aaca220f02b86f95, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5615edfca6bc44ec8e72d4468a89a62c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/363c66006107457abac3c0ad1b29e0a3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f79b0ca04b6a4946ab8db05d1c4f6a88, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d92d90b2825c471ab0f767a2080cee40, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/848aee9193a54fe7aea62c3587e0bc9a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/dc37d8162b2e45e9ba3461ce6bb30661, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b81555196ccb42cf859372bb7b0fe0a4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3d574b1d2fbe4961b658eabfbd56c065, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7dc8fcfc8f45463cba249e35cec7130f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a789cea8836c499bbfec46d4b0fdb557, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/494b18249d8d4c208122ff962e969416, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3cd8607f74db4312a07130e062f0e4e7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/1e3b4b59e794488b85274b6be45d286c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d85ae914356d4f179c6d01b0c418a11f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/af5cd8cd98c74773ac452fd0c4e96002, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f4d2107bb3694cdc8836c565aaedce68, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/05bdb5bfaa8b437b93b296b1545e1ddf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b2bdfce06b444543b4ade288bcc1286f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/9f0ae3c94e9345308aa5948fee289d93] to archive 2024-12-08T11:20:52,737 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:20:52,739 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3bad87a005d34384aff2c70538d23fe3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3bad87a005d34384aff2c70538d23fe3 2024-12-08T11:20:52,741 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3056591f8d634817bcaebb645c654e55 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3056591f8d634817bcaebb645c654e55 2024-12-08T11:20:52,743 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/2177e16ef18c491f90b90dd625cea51c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/2177e16ef18c491f90b90dd625cea51c 2024-12-08T11:20:52,745 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7f62da32e92c4891bd3b7a18582abe24 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7f62da32e92c4891bd3b7a18582abe24 2024-12-08T11:20:52,746 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/755215a241ef4a61bb3f5e028892033d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/755215a241ef4a61bb3f5e028892033d 2024-12-08T11:20:52,747 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d73d5fc120424c62aaca220f02b86f95 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d73d5fc120424c62aaca220f02b86f95 2024-12-08T11:20:52,748 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5615edfca6bc44ec8e72d4468a89a62c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5615edfca6bc44ec8e72d4468a89a62c 2024-12-08T11:20:52,749 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/363c66006107457abac3c0ad1b29e0a3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/363c66006107457abac3c0ad1b29e0a3 2024-12-08T11:20:52,750 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f79b0ca04b6a4946ab8db05d1c4f6a88 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f79b0ca04b6a4946ab8db05d1c4f6a88 2024-12-08T11:20:52,752 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d92d90b2825c471ab0f767a2080cee40 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d92d90b2825c471ab0f767a2080cee40 2024-12-08T11:20:52,753 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/848aee9193a54fe7aea62c3587e0bc9a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/848aee9193a54fe7aea62c3587e0bc9a 2024-12-08T11:20:52,754 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/dc37d8162b2e45e9ba3461ce6bb30661 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/dc37d8162b2e45e9ba3461ce6bb30661 2024-12-08T11:20:52,755 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b81555196ccb42cf859372bb7b0fe0a4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b81555196ccb42cf859372bb7b0fe0a4 2024-12-08T11:20:52,756 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3d574b1d2fbe4961b658eabfbd56c065 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3d574b1d2fbe4961b658eabfbd56c065 2024-12-08T11:20:52,757 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7dc8fcfc8f45463cba249e35cec7130f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/7dc8fcfc8f45463cba249e35cec7130f 2024-12-08T11:20:52,758 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a789cea8836c499bbfec46d4b0fdb557 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a789cea8836c499bbfec46d4b0fdb557 2024-12-08T11:20:52,759 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/494b18249d8d4c208122ff962e969416 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/494b18249d8d4c208122ff962e969416 2024-12-08T11:20:52,760 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3cd8607f74db4312a07130e062f0e4e7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/3cd8607f74db4312a07130e062f0e4e7 2024-12-08T11:20:52,761 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/1e3b4b59e794488b85274b6be45d286c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/1e3b4b59e794488b85274b6be45d286c 2024-12-08T11:20:52,762 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d85ae914356d4f179c6d01b0c418a11f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/d85ae914356d4f179c6d01b0c418a11f 2024-12-08T11:20:52,763 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/af5cd8cd98c74773ac452fd0c4e96002 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/af5cd8cd98c74773ac452fd0c4e96002 2024-12-08T11:20:52,764 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f4d2107bb3694cdc8836c565aaedce68 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/f4d2107bb3694cdc8836c565aaedce68 2024-12-08T11:20:52,765 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/05bdb5bfaa8b437b93b296b1545e1ddf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/05bdb5bfaa8b437b93b296b1545e1ddf 2024-12-08T11:20:52,766 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b2bdfce06b444543b4ade288bcc1286f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/b2bdfce06b444543b4ade288bcc1286f 2024-12-08T11:20:52,767 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/9f0ae3c94e9345308aa5948fee289d93 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/9f0ae3c94e9345308aa5948fee289d93 2024-12-08T11:20:52,769 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2ed4837e751a4518aeacea48c10b7923, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0031b815701048d3af7f758a99e35768, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/cb6e929a991d48cab4fa8094647316a9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5fa64daf04a145dd8785629745968a87, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/6e63bfefe598485eb8835e3a0b1c22f4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5ac7b5b561dd45dd9bb8408549bf997d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a47d443b6ce34b9ea2d6f791f00b2fa0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f4d0ce8d2a3e43ae92638ef05b334282, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/e71372bab9944bcdb0fee65e45de724f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/8c641353bc92468a9067a8f8ec63eb74, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2660455af2434fb7866202b144ab9cf0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/817d89d220564147aeb6278d79f00240, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/7beef68a7e464024946e88864158e6db, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/ead76508fb4d4ddaae8b57c2bc74306c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/180943d730024c9e9ccb3501b94c5e9f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/88916ab748fa48ad86cee75730a95ddf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/48e0e40eed504e6eaf46a86f2db41006, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/71777066f7ad48d6b0ffd0a02ca51482, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/32872f1588a444f7bad84cfeeee48005, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a351b8c71561470484babae2df4b7c12, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/72fae324842d44248edc4fdc92a3ab06, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/59bdeb7d247e4beda49d57c3380114f5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/30d4b72205194100b9da7b23ef4e650d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a04a0799140443f9ad8f3cd35ccd4264] to archive 2024-12-08T11:20:52,770 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:20:52,772 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2ed4837e751a4518aeacea48c10b7923 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2ed4837e751a4518aeacea48c10b7923 2024-12-08T11:20:52,773 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0031b815701048d3af7f758a99e35768 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0031b815701048d3af7f758a99e35768 2024-12-08T11:20:52,775 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/cb6e929a991d48cab4fa8094647316a9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/cb6e929a991d48cab4fa8094647316a9 2024-12-08T11:20:52,776 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5fa64daf04a145dd8785629745968a87 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5fa64daf04a145dd8785629745968a87 2024-12-08T11:20:52,777 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/6e63bfefe598485eb8835e3a0b1c22f4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/6e63bfefe598485eb8835e3a0b1c22f4 2024-12-08T11:20:52,778 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5ac7b5b561dd45dd9bb8408549bf997d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5ac7b5b561dd45dd9bb8408549bf997d 2024-12-08T11:20:52,784 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a47d443b6ce34b9ea2d6f791f00b2fa0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a47d443b6ce34b9ea2d6f791f00b2fa0 2024-12-08T11:20:52,785 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f4d0ce8d2a3e43ae92638ef05b334282 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f4d0ce8d2a3e43ae92638ef05b334282 2024-12-08T11:20:52,786 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/e71372bab9944bcdb0fee65e45de724f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/e71372bab9944bcdb0fee65e45de724f 2024-12-08T11:20:52,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/8c641353bc92468a9067a8f8ec63eb74 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/8c641353bc92468a9067a8f8ec63eb74 2024-12-08T11:20:52,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2660455af2434fb7866202b144ab9cf0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/2660455af2434fb7866202b144ab9cf0 2024-12-08T11:20:52,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/817d89d220564147aeb6278d79f00240 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/817d89d220564147aeb6278d79f00240 2024-12-08T11:20:52,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/7beef68a7e464024946e88864158e6db to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/7beef68a7e464024946e88864158e6db 2024-12-08T11:20:52,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/ead76508fb4d4ddaae8b57c2bc74306c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/ead76508fb4d4ddaae8b57c2bc74306c 2024-12-08T11:20:52,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/180943d730024c9e9ccb3501b94c5e9f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/180943d730024c9e9ccb3501b94c5e9f 2024-12-08T11:20:52,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/f3d2a8a5c5da4a3d8bf9ceb384219ff4 2024-12-08T11:20:52,798 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/88916ab748fa48ad86cee75730a95ddf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/88916ab748fa48ad86cee75730a95ddf 2024-12-08T11:20:52,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/48e0e40eed504e6eaf46a86f2db41006 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/48e0e40eed504e6eaf46a86f2db41006 2024-12-08T11:20:52,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/71777066f7ad48d6b0ffd0a02ca51482 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/71777066f7ad48d6b0ffd0a02ca51482 2024-12-08T11:20:52,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/32872f1588a444f7bad84cfeeee48005 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/32872f1588a444f7bad84cfeeee48005 2024-12-08T11:20:52,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a351b8c71561470484babae2df4b7c12 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a351b8c71561470484babae2df4b7c12 2024-12-08T11:20:52,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/72fae324842d44248edc4fdc92a3ab06 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/72fae324842d44248edc4fdc92a3ab06 2024-12-08T11:20:52,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/59bdeb7d247e4beda49d57c3380114f5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/59bdeb7d247e4beda49d57c3380114f5 2024-12-08T11:20:52,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/30d4b72205194100b9da7b23ef4e650d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/30d4b72205194100b9da7b23ef4e650d 2024-12-08T11:20:52,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a04a0799140443f9ad8f3cd35ccd4264 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/a04a0799140443f9ad8f3cd35ccd4264 2024-12-08T11:20:52,829 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/recovered.edits/393.seqid, newMaxSeqId=393, maxSeqId=4 2024-12-08T11:20:52,830 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f. 2024-12-08T11:20:52,830 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for e39fb8694c3bacc24718f77e81d6379f: 2024-12-08T11:20:52,832 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:52,833 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=e39fb8694c3bacc24718f77e81d6379f, regionState=CLOSED 2024-12-08T11:20:52,835 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-08T11:20:52,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure e39fb8694c3bacc24718f77e81d6379f, server=355ef6e50110,46083,1733656795491 in 1.5670 sec 2024-12-08T11:20:52,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-08T11:20:52,838 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=e39fb8694c3bacc24718f77e81d6379f, UNASSIGN in 1.5720 sec 2024-12-08T11:20:52,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-08T11:20:52,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5810 sec 2024-12-08T11:20:52,841 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656852841"}]},"ts":"1733656852841"} 2024-12-08T11:20:52,842 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T11:20:52,844 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T11:20:52,845 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5960 sec 2024-12-08T11:20:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-08T11:20:53,357 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-08T11:20:53,357 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T11:20:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:53,359 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:53,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-08T11:20:53,360 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:53,362 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,364 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/recovered.edits] 2024-12-08T11:20:53,366 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/48114ce252bc4850bceac598f66f9d9a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/48114ce252bc4850bceac598f66f9d9a 2024-12-08T11:20:53,367 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6b646013b5b6425497abc2f317243659 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/6b646013b5b6425497abc2f317243659 2024-12-08T11:20:53,368 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b0642f1986aa445f99603184a2e4714c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/A/b0642f1986aa445f99603184a2e4714c 2024-12-08T11:20:53,370 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5e836f198ad7484492658969b6c41111 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/5e836f198ad7484492658969b6c41111 2024-12-08T11:20:53,371 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/980ec910bd514278a6a0b7931f073234 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/980ec910bd514278a6a0b7931f073234 2024-12-08T11:20:53,373 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a1a3813f3fe94fc2847dbdd37f82e678 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/B/a1a3813f3fe94fc2847dbdd37f82e678 2024-12-08T11:20:53,375 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0f34c92a5b16434dac2b1c6ef413e22f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/0f34c92a5b16434dac2b1c6ef413e22f 2024-12-08T11:20:53,376 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5d5d0c792927461c9983266cbdb58d7a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/5d5d0c792927461c9983266cbdb58d7a 2024-12-08T11:20:53,377 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/c0866367a21f448d94cefedb190ed11b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/C/c0866367a21f448d94cefedb190ed11b 2024-12-08T11:20:53,380 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/recovered.edits/393.seqid to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f/recovered.edits/393.seqid 2024-12-08T11:20:53,381 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,381 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T11:20:53,381 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T11:20:53,382 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-08T11:20:53,386 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208112b53ad5ee143bf806a77cc13d0e4f2_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208112b53ad5ee143bf806a77cc13d0e4f2_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,387 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120813ed2350e4574c9ab50184aa4e42c3ec_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120813ed2350e4574c9ab50184aa4e42c3ec_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,389 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081737ac8d91a641099368cc80b44c7d0f_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081737ac8d91a641099368cc80b44c7d0f_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,390 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082eaadcce464047ccbf21fcf01e86f0c3_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082eaadcce464047ccbf21fcf01e86f0c3_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,391 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120832efe4fdc86946ae9e19a9d6651e50f4_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120832efe4fdc86946ae9e19a9d6651e50f4_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,392 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120836e13d436db9442fa8f6efb49015abf1_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120836e13d436db9442fa8f6efb49015abf1_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,393 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120849502b4568da4395a484c23de0ac432a_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120849502b4568da4395a484c23de0ac432a_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,394 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084f4123cae11f4d16975e2057438b338c_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412084f4123cae11f4d16975e2057438b338c_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,395 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086c5aeb6e72784fca845a181323f82342_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086c5aeb6e72784fca845a181323f82342_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,397 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086f4a8e5848274b5c88b27e1c5467c10c_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086f4a8e5848274b5c88b27e1c5467c10c_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,398 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087574c4b8f5e4428f80c1729d5d7de1da_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087574c4b8f5e4428f80c1729d5d7de1da_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,399 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087ad28a6a0b7349cc9f258375b66e3ad5_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087ad28a6a0b7349cc9f258375b66e3ad5_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,400 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089324cfe12855485ab03908d834895082_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089324cfe12855485ab03908d834895082_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,401 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120898f163944c86405ea34959d84f837299_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120898f163944c86405ea34959d84f837299_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,402 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a010488537404511a2fa8c235d25059f_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a010488537404511a2fa8c235d25059f_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,404 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c046602954a745b1ad975fb46099a42b_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c046602954a745b1ad975fb46099a42b_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,405 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c2335dc8508b4f76902f4c4187bd6c0e_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c2335dc8508b4f76902f4c4187bd6c0e_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,406 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d906cda2d3af4b4c932e8efc910c0020_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d906cda2d3af4b4c932e8efc910c0020_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,407 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f42792ab630f459e8c1c45f0f31d0eea_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f42792ab630f459e8c1c45f0f31d0eea_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,408 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f5e43491b2bb4b19919eb74e895955e3_e39fb8694c3bacc24718f77e81d6379f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f5e43491b2bb4b19919eb74e895955e3_e39fb8694c3bacc24718f77e81d6379f 2024-12-08T11:20:53,409 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T11:20:53,411 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:53,413 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T11:20:53,416 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T11:20:53,416 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:53,416 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T11:20:53,417 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733656853416"}]},"ts":"9223372036854775807"} 2024-12-08T11:20:53,419 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T11:20:53,419 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => e39fb8694c3bacc24718f77e81d6379f, NAME => 'TestAcidGuarantees,,1733656825212.e39fb8694c3bacc24718f77e81d6379f.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T11:20:53,419 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T11:20:53,419 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733656853419"}]},"ts":"9223372036854775807"} 2024-12-08T11:20:53,421 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T11:20:53,426 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:53,427 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 69 msec 2024-12-08T11:20:53,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-08T11:20:53,461 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-08T11:20:53,470 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=243 (was 239) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-858952241_22 at /127.0.0.1:43252 [Waiting for operation #680] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x15cb16c5-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1577542464_22 at /127.0.0.1:43370 [Waiting for operation #671] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=456 (was 455) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=515 (was 435) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=7136 (was 6512) - AvailableMemoryMB LEAK? - 2024-12-08T11:20:53,481 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=243, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=515, ProcessCount=11, AvailableMemoryMB=7135 2024-12-08T11:20:53,482 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:20:53,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:20:53,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T11:20:53,484 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T11:20:53,485 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:53,485 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-12-08T11:20:53,485 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T11:20:53,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T11:20:53,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742080_1256 (size=963) 2024-12-08T11:20:53,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T11:20:53,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T11:20:53,828 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T11:20:53,893 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:20:53,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742081_1257 (size=53) 2024-12-08T11:20:54,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T11:20:54,301 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:20:54,301 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing eada905a64e5510a34d2159c0d8947a2, disabling compactions & flushes 2024-12-08T11:20:54,301 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,301 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,301 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. after waiting 0 ms 2024-12-08T11:20:54,301 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,301 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,302 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:54,303 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T11:20:54,303 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733656854303"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733656854303"}]},"ts":"1733656854303"} 2024-12-08T11:20:54,304 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T11:20:54,305 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T11:20:54,305 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656854305"}]},"ts":"1733656854305"} 2024-12-08T11:20:54,306 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T11:20:54,310 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eada905a64e5510a34d2159c0d8947a2, ASSIGN}] 2024-12-08T11:20:54,311 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eada905a64e5510a34d2159c0d8947a2, ASSIGN 2024-12-08T11:20:54,312 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=eada905a64e5510a34d2159c0d8947a2, ASSIGN; state=OFFLINE, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=false 2024-12-08T11:20:54,462 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=eada905a64e5510a34d2159c0d8947a2, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:54,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:20:54,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T11:20:54,615 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:54,618 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,618 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:20:54,618 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,618 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:20:54,618 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,619 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,620 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,621 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:54,621 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eada905a64e5510a34d2159c0d8947a2 columnFamilyName A 2024-12-08T11:20:54,621 DEBUG [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:54,622 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.HStore(327): Store=eada905a64e5510a34d2159c0d8947a2/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:54,622 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,623 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:54,623 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eada905a64e5510a34d2159c0d8947a2 columnFamilyName B 2024-12-08T11:20:54,623 DEBUG [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:54,623 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.HStore(327): Store=eada905a64e5510a34d2159c0d8947a2/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:54,623 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,624 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:20:54,624 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eada905a64e5510a34d2159c0d8947a2 columnFamilyName C 2024-12-08T11:20:54,625 DEBUG [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:20:54,625 INFO [StoreOpener-eada905a64e5510a34d2159c0d8947a2-1 {}] regionserver.HStore(327): Store=eada905a64e5510a34d2159c0d8947a2/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:20:54,625 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,626 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,626 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,627 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:20:54,628 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:54,630 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:20:54,630 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened eada905a64e5510a34d2159c0d8947a2; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58871390, jitterRate=-0.12274792790412903}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:20:54,631 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:54,631 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., pid=70, masterSystemTime=1733656854615 2024-12-08T11:20:54,632 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,632 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:54,633 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=eada905a64e5510a34d2159c0d8947a2, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:20:54,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-08T11:20:54,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 in 171 msec 2024-12-08T11:20:54,637 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-08T11:20:54,637 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=eada905a64e5510a34d2159c0d8947a2, ASSIGN in 325 msec 2024-12-08T11:20:54,637 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T11:20:54,637 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656854637"}]},"ts":"1733656854637"} 2024-12-08T11:20:54,638 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T11:20:54,640 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T11:20:54,641 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1570 sec 2024-12-08T11:20:55,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-08T11:20:55,590 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-12-08T11:20:55,591 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x783a99f7 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f810aa9 2024-12-08T11:20:55,594 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b70f48f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,596 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,597 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34752, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,598 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T11:20:55,599 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39818, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T11:20:55,602 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53bfce45 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64dc42d9 2024-12-08T11:20:55,604 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58341641, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,605 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a569490 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c1ac389 2024-12-08T11:20:55,609 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44645c55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,610 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-12-08T11:20:55,613 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,614 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-12-08T11:20:55,616 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,617 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-12-08T11:20:55,624 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,626 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e9ae050 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a703d2 2024-12-08T11:20:55,628 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17cf7fc0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,629 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fef31f8 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14ed1e44 2024-12-08T11:20:55,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b04266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,633 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-12-08T11:20:55,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,637 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-12-08T11:20:55,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,643 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-12-08T11:20:55,646 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:20:55,653 DEBUG [hconnection-0x5d197597-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,654 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34768, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,656 DEBUG [hconnection-0x4f0c7ed9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,657 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,668 DEBUG [hconnection-0x3a5a211b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,669 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34788, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:55,683 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:20:55,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:20:55,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:55,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:20:55,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:55,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:20:55,683 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:55,692 DEBUG [hconnection-0x46e4ce2a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,693 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34804, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,705 DEBUG [hconnection-0x42b6a9aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,706 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,723 DEBUG [hconnection-0x2c9198e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,725 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,734 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:55,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-08T11:20:55,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T11:20:55,736 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:55,736 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:55,737 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:55,742 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/7f53e4716b38485f842bbd9f5a710ad4 is 50, key is test_row_0/A:col10/1733656855682/Put/seqid=0 2024-12-08T11:20:55,744 DEBUG [hconnection-0x16c535ad-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,744 DEBUG [hconnection-0x6e8bb779-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,745 DEBUG [hconnection-0x532e5821-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,745 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,745 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34812, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,746 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,750 DEBUG [hconnection-0x63cfb1e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:20:55,751 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34824, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:20:55,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742082_1258 (size=12001) 2024-12-08T11:20:55,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656915767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656915767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656915767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656915769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656915769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T11:20:55,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656915871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656915872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656915872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,873 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656915872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:55,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656915872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,888 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:55,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-08T11:20:55,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:55,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:55,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:55,889 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:55,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:55,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T11:20:56,041 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-08T11:20:56,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:56,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:56,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:56,042 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:56,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:56,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:56,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656916074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656916074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656916075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656916075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656916076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/7f53e4716b38485f842bbd9f5a710ad4 2024-12-08T11:20:56,195 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-08T11:20:56,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:56,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:56,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:56,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:56,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:56,197 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ccd5cf83bdb44288b2274fa8fc4d81f8 is 50, key is test_row_0/B:col10/1733656855682/Put/seqid=0 2024-12-08T11:20:56,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:56,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742083_1259 (size=12001) 2024-12-08T11:20:56,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ccd5cf83bdb44288b2274fa8fc4d81f8 2024-12-08T11:20:56,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/81424e84f7414643b08c2280811f795f is 50, key is test_row_0/C:col10/1733656855682/Put/seqid=0 2024-12-08T11:20:56,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742084_1260 (size=12001) 2024-12-08T11:20:56,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/81424e84f7414643b08c2280811f795f 2024-12-08T11:20:56,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/7f53e4716b38485f842bbd9f5a710ad4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7f53e4716b38485f842bbd9f5a710ad4 2024-12-08T11:20:56,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7f53e4716b38485f842bbd9f5a710ad4, entries=150, sequenceid=12, filesize=11.7 K 2024-12-08T11:20:56,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ccd5cf83bdb44288b2274fa8fc4d81f8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ccd5cf83bdb44288b2274fa8fc4d81f8 2024-12-08T11:20:56,326 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ccd5cf83bdb44288b2274fa8fc4d81f8, entries=150, sequenceid=12, filesize=11.7 K 2024-12-08T11:20:56,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/81424e84f7414643b08c2280811f795f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/81424e84f7414643b08c2280811f795f 2024-12-08T11:20:56,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/81424e84f7414643b08c2280811f795f, entries=150, sequenceid=12, filesize=11.7 K 2024-12-08T11:20:56,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for eada905a64e5510a34d2159c0d8947a2 in 649ms, sequenceid=12, compaction requested=false 2024-12-08T11:20:56,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:56,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T11:20:56,349 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,349 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-08T11:20:56,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:56,350 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:20:56,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:20:56,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:56,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:20:56,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:56,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:20:56,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:56,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/142c0f5788934c6aa5cdb2776cd3d2b5 is 50, key is test_row_0/A:col10/1733656855767/Put/seqid=0 2024-12-08T11:20:56,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:56,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:56,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656916384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742085_1261 (size=12001) 2024-12-08T11:20:56,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656916386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656916387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,391 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/142c0f5788934c6aa5cdb2776cd3d2b5 2024-12-08T11:20:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656916387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656916388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/dd4224e3816f42648f8876aace388fe8 is 50, key is test_row_0/B:col10/1733656855767/Put/seqid=0 2024-12-08T11:20:56,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742086_1262 (size=12001) 2024-12-08T11:20:56,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656916488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656916491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656916492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656916492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656916492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,694 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656916692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656916693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656916696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656916697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656916698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T11:20:56,865 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/dd4224e3816f42648f8876aace388fe8 2024-12-08T11:20:56,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/a273155288754b1e8617d7e3bda21e17 is 50, key is test_row_0/C:col10/1733656855767/Put/seqid=0 2024-12-08T11:20:56,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742087_1263 (size=12001) 2024-12-08T11:20:56,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656916996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:56,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:56,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656916997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656916999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656916999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656917002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,345 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/a273155288754b1e8617d7e3bda21e17 2024-12-08T11:20:57,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/142c0f5788934c6aa5cdb2776cd3d2b5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/142c0f5788934c6aa5cdb2776cd3d2b5 2024-12-08T11:20:57,365 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/142c0f5788934c6aa5cdb2776cd3d2b5, entries=150, sequenceid=37, filesize=11.7 K 2024-12-08T11:20:57,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/dd4224e3816f42648f8876aace388fe8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/dd4224e3816f42648f8876aace388fe8 2024-12-08T11:20:57,370 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/dd4224e3816f42648f8876aace388fe8, entries=150, sequenceid=37, filesize=11.7 K 2024-12-08T11:20:57,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/a273155288754b1e8617d7e3bda21e17 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/a273155288754b1e8617d7e3bda21e17 2024-12-08T11:20:57,378 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/a273155288754b1e8617d7e3bda21e17, entries=150, sequenceid=37, filesize=11.7 K 2024-12-08T11:20:57,379 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for eada905a64e5510a34d2159c0d8947a2 in 1030ms, sequenceid=37, compaction requested=false 2024-12-08T11:20:57,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:57,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:57,379 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-08T11:20:57,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-08T11:20:57,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-08T11:20:57,383 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6450 sec 2024-12-08T11:20:57,386 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.6500 sec 2024-12-08T11:20:57,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:57,505 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:20:57,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:20:57,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:57,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:20:57,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:57,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:20:57,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:57,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/5729e124e0bf493d9fc6654d2e103c18 is 50, key is test_row_0/A:col10/1733656857504/Put/seqid=0 2024-12-08T11:20:57,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656917524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656917524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656917525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656917528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656917528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742088_1264 (size=12001) 2024-12-08T11:20:57,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/5729e124e0bf493d9fc6654d2e103c18 2024-12-08T11:20:57,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/082040eae6c74b78a6da0e6035580111 is 50, key is test_row_0/B:col10/1733656857504/Put/seqid=0 2024-12-08T11:20:57,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742089_1265 (size=12001) 2024-12-08T11:20:57,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656917630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656917630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656917632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656917632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656917635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656917835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656917836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656917838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656917838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-08T11:20:57,840 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-08T11:20:57,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:57,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-08T11:20:57,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T11:20:57,845 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:57,846 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:57,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656917844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:57,848 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:57,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:57,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T11:20:57,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/082040eae6c74b78a6da0e6035580111 2024-12-08T11:20:58,000 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-08T11:20:58,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/415ce065b02f420b9c845b1d99000e01 is 50, key is test_row_0/C:col10/1733656857504/Put/seqid=0 2024-12-08T11:20:58,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742090_1266 (size=12001) 2024-12-08T11:20:58,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656918137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656918139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656918141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656918142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T11:20:58,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656918150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,153 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-08T11:20:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-08T11:20:58,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:58,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:58,369 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T11:20:58,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/415ce065b02f420b9c845b1d99000e01 2024-12-08T11:20:58,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/5729e124e0bf493d9fc6654d2e103c18 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/5729e124e0bf493d9fc6654d2e103c18 2024-12-08T11:20:58,427 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/5729e124e0bf493d9fc6654d2e103c18, entries=150, sequenceid=51, filesize=11.7 K 2024-12-08T11:20:58,428 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/082040eae6c74b78a6da0e6035580111 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/082040eae6c74b78a6da0e6035580111 2024-12-08T11:20:58,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/082040eae6c74b78a6da0e6035580111, entries=150, sequenceid=51, filesize=11.7 K 2024-12-08T11:20:58,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/415ce065b02f420b9c845b1d99000e01 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/415ce065b02f420b9c845b1d99000e01 2024-12-08T11:20:58,437 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/415ce065b02f420b9c845b1d99000e01, entries=150, sequenceid=51, filesize=11.7 K 2024-12-08T11:20:58,438 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-08T11:20:58,438 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for eada905a64e5510a34d2159c0d8947a2 in 933ms, sequenceid=51, compaction requested=true 2024-12-08T11:20:58,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:58,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:58,439 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:20:58,439 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:58,439 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:58,440 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:58,440 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:58,440 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:20:58,440 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:20:58,440 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,440 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,440 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ccd5cf83bdb44288b2274fa8fc4d81f8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/dd4224e3816f42648f8876aace388fe8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/082040eae6c74b78a6da0e6035580111] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.2 K 2024-12-08T11:20:58,440 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7f53e4716b38485f842bbd9f5a710ad4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/142c0f5788934c6aa5cdb2776cd3d2b5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/5729e124e0bf493d9fc6654d2e103c18] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.2 K 2024-12-08T11:20:58,441 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ccd5cf83bdb44288b2274fa8fc4d81f8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733656855660 2024-12-08T11:20:58,441 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f53e4716b38485f842bbd9f5a710ad4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733656855660 2024-12-08T11:20:58,441 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 142c0f5788934c6aa5cdb2776cd3d2b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733656855761 2024-12-08T11:20:58,441 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting dd4224e3816f42648f8876aace388fe8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733656855761 2024-12-08T11:20:58,442 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 082040eae6c74b78a6da0e6035580111, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656856386 2024-12-08T11:20:58,442 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5729e124e0bf493d9fc6654d2e103c18, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656856386 2024-12-08T11:20:58,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T11:20:58,459 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-08T11:20:58,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,460 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:20:58,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:20:58,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:58,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:20:58,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:58,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:20:58,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:58,464 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:58,465 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/632dd9338341439ba4fac1596a72e0c8 is 50, key is test_row_0/B:col10/1733656857504/Put/seqid=0 2024-12-08T11:20:58,468 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#220 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:58,469 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/8ea1da108578496e8e264d31f4d827f6 is 50, key is test_row_0/A:col10/1733656857504/Put/seqid=0 2024-12-08T11:20:58,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/2a9b541f4bb14b06b7d8ceaf4fdeed45 is 50, key is test_row_0/A:col10/1733656857523/Put/seqid=0 2024-12-08T11:20:58,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742091_1267 (size=12104) 2024-12-08T11:20:58,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742092_1268 (size=12104) 2024-12-08T11:20:58,492 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/632dd9338341439ba4fac1596a72e0c8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/632dd9338341439ba4fac1596a72e0c8 2024-12-08T11:20:58,497 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/8ea1da108578496e8e264d31f4d827f6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8ea1da108578496e8e264d31f4d827f6 2024-12-08T11:20:58,503 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into 632dd9338341439ba4fac1596a72e0c8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:58,503 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:58,503 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=13, startTime=1733656858439; duration=0sec 2024-12-08T11:20:58,503 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:58,503 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:20:58,503 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:58,504 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:58,504 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:20:58,504 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,504 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/81424e84f7414643b08c2280811f795f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/a273155288754b1e8617d7e3bda21e17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/415ce065b02f420b9c845b1d99000e01] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.2 K 2024-12-08T11:20:58,505 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 81424e84f7414643b08c2280811f795f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1733656855660 2024-12-08T11:20:58,505 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a273155288754b1e8617d7e3bda21e17, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1733656855761 2024-12-08T11:20:58,505 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 415ce065b02f420b9c845b1d99000e01, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656856386 2024-12-08T11:20:58,511 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into 8ea1da108578496e8e264d31f4d827f6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:58,511 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:58,511 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=13, startTime=1733656858438; duration=0sec 2024-12-08T11:20:58,512 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:58,512 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:20:58,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742093_1269 (size=12001) 2024-12-08T11:20:58,523 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#222 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:58,523 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/87fdbc49f546482884d3813dc4fca7ed is 50, key is test_row_0/C:col10/1733656857504/Put/seqid=0 2024-12-08T11:20:58,524 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/2a9b541f4bb14b06b7d8ceaf4fdeed45 2024-12-08T11:20:58,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742094_1270 (size=12104) 2024-12-08T11:20:58,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/cd106c59d8914b58aedac67496162e4e is 50, key is test_row_0/B:col10/1733656857523/Put/seqid=0 2024-12-08T11:20:58,536 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/87fdbc49f546482884d3813dc4fca7ed as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/87fdbc49f546482884d3813dc4fca7ed 2024-12-08T11:20:58,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742095_1271 (size=12001) 2024-12-08T11:20:58,540 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/cd106c59d8914b58aedac67496162e4e 2024-12-08T11:20:58,542 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into 87fdbc49f546482884d3813dc4fca7ed(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:58,542 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:58,542 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=13, startTime=1733656858439; duration=0sec 2024-12-08T11:20:58,542 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:58,542 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:20:58,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/56b8f7899c5d43f4b3c764d036b01430 is 50, key is test_row_0/C:col10/1733656857523/Put/seqid=0 2024-12-08T11:20:58,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742096_1272 (size=12001) 2024-12-08T11:20:58,591 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/56b8f7899c5d43f4b3c764d036b01430 2024-12-08T11:20:58,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/2a9b541f4bb14b06b7d8ceaf4fdeed45 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2a9b541f4bb14b06b7d8ceaf4fdeed45 2024-12-08T11:20:58,605 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2a9b541f4bb14b06b7d8ceaf4fdeed45, entries=150, sequenceid=73, filesize=11.7 K 2024-12-08T11:20:58,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/cd106c59d8914b58aedac67496162e4e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/cd106c59d8914b58aedac67496162e4e 2024-12-08T11:20:58,612 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/cd106c59d8914b58aedac67496162e4e, entries=150, sequenceid=73, filesize=11.7 K 2024-12-08T11:20:58,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/56b8f7899c5d43f4b3c764d036b01430 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/56b8f7899c5d43f4b3c764d036b01430 2024-12-08T11:20:58,618 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/56b8f7899c5d43f4b3c764d036b01430, entries=150, sequenceid=73, filesize=11.7 K 2024-12-08T11:20:58,619 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for eada905a64e5510a34d2159c0d8947a2 in 159ms, sequenceid=73, compaction requested=false 2024-12-08T11:20:58,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:58,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:58,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-08T11:20:58,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-08T11:20:58,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-08T11:20:58,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 773 msec 2024-12-08T11:20:58,625 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 780 msec 2024-12-08T11:20:58,645 DEBUG [master/355ef6e50110:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-08T11:20:58,651 DEBUG [master/355ef6e50110:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 9ba0a86166fe12d211f42156054bda9c changed from -1.0 to 0.0, refreshing cache 2024-12-08T11:20:58,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:58,652 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:20:58,652 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:20:58,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:58,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:20:58,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:58,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:20:58,653 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:58,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/0f03918ea57146fa9a121f14e3a713c8 is 50, key is test_row_0/A:col10/1733656858650/Put/seqid=0 2024-12-08T11:20:58,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742097_1273 (size=12001) 2024-12-08T11:20:58,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656918692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656918693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656918694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656918695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656918695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656918798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656918798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656918799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656918799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656918800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:58,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-08T11:20:58,948 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-08T11:20:58,949 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:20:58,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-08T11:20:58,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T11:20:58,954 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:20:58,955 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:20:58,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:20:59,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656919002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656919002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656919003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656919002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656919004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T11:20:59,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/0f03918ea57146fa9a121f14e3a713c8 2024-12-08T11:20:59,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/4c93fcc9833e4f5baa9f6c7be9f19221 is 50, key is test_row_0/B:col10/1733656858650/Put/seqid=0 2024-12-08T11:20:59,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742098_1274 (size=12001) 2024-12-08T11:20:59,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/4c93fcc9833e4f5baa9f6c7be9f19221 2024-12-08T11:20:59,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/329f3807e00f4051ae458dadb636f962 is 50, key is test_row_0/C:col10/1733656858650/Put/seqid=0 2024-12-08T11:20:59,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742099_1275 (size=12001) 2024-12-08T11:20:59,107 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,108 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T11:20:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:59,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:59,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:59,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:20:59,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/329f3807e00f4051ae458dadb636f962 2024-12-08T11:20:59,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/0f03918ea57146fa9a121f14e3a713c8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0f03918ea57146fa9a121f14e3a713c8 2024-12-08T11:20:59,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0f03918ea57146fa9a121f14e3a713c8, entries=150, sequenceid=87, filesize=11.7 K 2024-12-08T11:20:59,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/4c93fcc9833e4f5baa9f6c7be9f19221 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/4c93fcc9833e4f5baa9f6c7be9f19221 2024-12-08T11:20:59,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/4c93fcc9833e4f5baa9f6c7be9f19221, entries=150, sequenceid=87, filesize=11.7 K 2024-12-08T11:20:59,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/329f3807e00f4051ae458dadb636f962 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/329f3807e00f4051ae458dadb636f962 2024-12-08T11:20:59,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/329f3807e00f4051ae458dadb636f962, entries=150, sequenceid=87, filesize=11.7 K 2024-12-08T11:20:59,137 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for eada905a64e5510a34d2159c0d8947a2 in 485ms, sequenceid=87, compaction requested=true 2024-12-08T11:20:59,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:59,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:20:59,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:59,138 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:59,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:20:59,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:59,138 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:59,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:20:59,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:59,139 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:59,139 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:59,139 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:20:59,139 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:20:59,139 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:59,139 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:59,139 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/632dd9338341439ba4fac1596a72e0c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/cd106c59d8914b58aedac67496162e4e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/4c93fcc9833e4f5baa9f6c7be9f19221] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.3 K 2024-12-08T11:20:59,139 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8ea1da108578496e8e264d31f4d827f6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2a9b541f4bb14b06b7d8ceaf4fdeed45, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0f03918ea57146fa9a121f14e3a713c8] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.3 K 2024-12-08T11:20:59,140 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ea1da108578496e8e264d31f4d827f6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656856386 2024-12-08T11:20:59,140 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a9b541f4bb14b06b7d8ceaf4fdeed45, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733656857522 2024-12-08T11:20:59,140 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 632dd9338341439ba4fac1596a72e0c8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656856386 2024-12-08T11:20:59,140 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f03918ea57146fa9a121f14e3a713c8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733656858645 2024-12-08T11:20:59,141 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting cd106c59d8914b58aedac67496162e4e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733656857522 2024-12-08T11:20:59,141 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c93fcc9833e4f5baa9f6c7be9f19221, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733656858645 2024-12-08T11:20:59,149 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:59,150 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/b6c0f6f49b904893988de6b8deb25744 is 50, key is test_row_0/A:col10/1733656858650/Put/seqid=0 2024-12-08T11:20:59,152 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#229 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:59,152 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/20287007003c40f38ef0c48d45d1ac8f is 50, key is test_row_0/B:col10/1733656858650/Put/seqid=0 2024-12-08T11:20:59,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742100_1276 (size=12207) 2024-12-08T11:20:59,175 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/b6c0f6f49b904893988de6b8deb25744 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b6c0f6f49b904893988de6b8deb25744 2024-12-08T11:20:59,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742101_1277 (size=12207) 2024-12-08T11:20:59,182 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into b6c0f6f49b904893988de6b8deb25744(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:59,182 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:59,182 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=13, startTime=1733656859137; duration=0sec 2024-12-08T11:20:59,182 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:20:59,182 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:20:59,182 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:20:59,183 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:20:59,183 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:20:59,183 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:59,183 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/87fdbc49f546482884d3813dc4fca7ed, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/56b8f7899c5d43f4b3c764d036b01430, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/329f3807e00f4051ae458dadb636f962] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.3 K 2024-12-08T11:20:59,184 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87fdbc49f546482884d3813dc4fca7ed, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656856386 2024-12-08T11:20:59,184 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56b8f7899c5d43f4b3c764d036b01430, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1733656857522 2024-12-08T11:20:59,184 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 329f3807e00f4051ae458dadb636f962, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733656858645 2024-12-08T11:20:59,192 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#230 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:20:59,192 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/bb77684d12ca4f38839ad1fbec5b452b is 50, key is test_row_0/C:col10/1733656858650/Put/seqid=0 2024-12-08T11:20:59,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742102_1278 (size=12207) 2024-12-08T11:20:59,201 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/bb77684d12ca4f38839ad1fbec5b452b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bb77684d12ca4f38839ad1fbec5b452b 2024-12-08T11:20:59,209 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into bb77684d12ca4f38839ad1fbec5b452b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:59,209 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:59,209 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=13, startTime=1733656859138; duration=0sec 2024-12-08T11:20:59,209 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:59,209 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:20:59,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T11:20:59,263 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,264 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-08T11:20:59,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:20:59,264 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:20:59,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:20:59,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:59,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:20:59,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:59,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:20:59,265 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:20:59,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/387ffd1f8a474f948c7a4def6ef89bdc is 50, key is test_row_0/A:col10/1733656858693/Put/seqid=0 2024-12-08T11:20:59,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742103_1279 (size=12001) 2024-12-08T11:20:59,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:20:59,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:20:59,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656919314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656919314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656919314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656919316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656919316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656919417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656919419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656919419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656919419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656919420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T11:20:59,586 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/20287007003c40f38ef0c48d45d1ac8f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/20287007003c40f38ef0c48d45d1ac8f 2024-12-08T11:20:59,591 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into 20287007003c40f38ef0c48d45d1ac8f(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:20:59,592 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:20:59,592 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=13, startTime=1733656859138; duration=0sec 2024-12-08T11:20:59,592 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:20:59,592 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:20:59,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656919619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656919622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656919623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656919624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656919624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,679 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/387ffd1f8a474f948c7a4def6ef89bdc 2024-12-08T11:20:59,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/5d93b9f2438e41f9b06920d79b6a8b84 is 50, key is test_row_0/B:col10/1733656858693/Put/seqid=0 2024-12-08T11:20:59,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742104_1280 (size=12001) 2024-12-08T11:20:59,923 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656919921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656919925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656919926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656919926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:20:59,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:20:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656919927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T11:21:00,095 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/5d93b9f2438e41f9b06920d79b6a8b84 2024-12-08T11:21:00,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/c1293455a7c54f06880deb42c1748bbb is 50, key is test_row_0/C:col10/1733656858693/Put/seqid=0 2024-12-08T11:21:00,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742105_1281 (size=12001) 2024-12-08T11:21:00,110 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/c1293455a7c54f06880deb42c1748bbb 2024-12-08T11:21:00,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/387ffd1f8a474f948c7a4def6ef89bdc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/387ffd1f8a474f948c7a4def6ef89bdc 2024-12-08T11:21:00,119 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/387ffd1f8a474f948c7a4def6ef89bdc, entries=150, sequenceid=114, filesize=11.7 K 2024-12-08T11:21:00,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/5d93b9f2438e41f9b06920d79b6a8b84 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/5d93b9f2438e41f9b06920d79b6a8b84 2024-12-08T11:21:00,124 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/5d93b9f2438e41f9b06920d79b6a8b84, entries=150, sequenceid=114, filesize=11.7 K 2024-12-08T11:21:00,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/c1293455a7c54f06880deb42c1748bbb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c1293455a7c54f06880deb42c1748bbb 2024-12-08T11:21:00,129 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c1293455a7c54f06880deb42c1748bbb, entries=150, sequenceid=114, filesize=11.7 K 2024-12-08T11:21:00,130 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for eada905a64e5510a34d2159c0d8947a2 in 866ms, sequenceid=114, compaction requested=false 2024-12-08T11:21:00,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:00,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:00,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-08T11:21:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-08T11:21:00,134 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-08T11:21:00,134 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1770 sec 2024-12-08T11:21:00,135 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.1840 sec 2024-12-08T11:21:00,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:00,429 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:21:00,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:00,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:00,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:00,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:00,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:00,431 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:00,436 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/fe302cdf557446eca0b02a9c726bcf50 is 50, key is test_row_0/A:col10/1733656860428/Put/seqid=0 2024-12-08T11:21:00,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742106_1282 (size=12051) 2024-12-08T11:21:00,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/fe302cdf557446eca0b02a9c726bcf50 2024-12-08T11:21:00,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,450 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656920447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656920447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656920448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/c986c4ff224f4b6383fa35526e6d874c is 50, key is test_row_0/B:col10/1733656860428/Put/seqid=0 2024-12-08T11:21:00,453 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656920450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,454 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656920452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742107_1283 (size=12051) 2024-12-08T11:21:00,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656920551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656920551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,554 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656920551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656920555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656920559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656920754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,756 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656920755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656920763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656920763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:00,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656920763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:00,857 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/c986c4ff224f4b6383fa35526e6d874c 2024-12-08T11:21:00,864 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/c0908b11886e42a78b86575337928136 is 50, key is test_row_0/C:col10/1733656860428/Put/seqid=0 2024-12-08T11:21:00,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742108_1284 (size=12051) 2024-12-08T11:21:00,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/c0908b11886e42a78b86575337928136 2024-12-08T11:21:00,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/fe302cdf557446eca0b02a9c726bcf50 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/fe302cdf557446eca0b02a9c726bcf50 2024-12-08T11:21:00,897 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/fe302cdf557446eca0b02a9c726bcf50, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T11:21:00,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/c986c4ff224f4b6383fa35526e6d874c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c986c4ff224f4b6383fa35526e6d874c 2024-12-08T11:21:00,908 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c986c4ff224f4b6383fa35526e6d874c, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T11:21:00,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/c0908b11886e42a78b86575337928136 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c0908b11886e42a78b86575337928136 2024-12-08T11:21:00,916 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c0908b11886e42a78b86575337928136, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T11:21:00,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for eada905a64e5510a34d2159c0d8947a2 in 488ms, sequenceid=130, compaction requested=true 2024-12-08T11:21:00,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:00,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:00,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:00,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:00,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:00,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:00,918 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:00,918 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:00,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:00,919 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:00,919 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:00,919 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:00,919 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b6c0f6f49b904893988de6b8deb25744, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/387ffd1f8a474f948c7a4def6ef89bdc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/fe302cdf557446eca0b02a9c726bcf50] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.4 K 2024-12-08T11:21:00,919 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:00,919 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:00,920 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:00,920 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/20287007003c40f38ef0c48d45d1ac8f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/5d93b9f2438e41f9b06920d79b6a8b84, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c986c4ff224f4b6383fa35526e6d874c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.4 K 2024-12-08T11:21:00,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 20287007003c40f38ef0c48d45d1ac8f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733656858645 2024-12-08T11:21:00,920 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6c0f6f49b904893988de6b8deb25744, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733656858645 2024-12-08T11:21:00,921 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 387ffd1f8a474f948c7a4def6ef89bdc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733656858675 2024-12-08T11:21:00,921 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d93b9f2438e41f9b06920d79b6a8b84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733656858675 2024-12-08T11:21:00,921 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c986c4ff224f4b6383fa35526e6d874c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656859312 2024-12-08T11:21:00,921 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe302cdf557446eca0b02a9c726bcf50, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656859312 2024-12-08T11:21:00,930 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#237 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:00,930 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2a72c8cea8af4372888382677cba2b77 is 50, key is test_row_0/B:col10/1733656860428/Put/seqid=0 2024-12-08T11:21:00,938 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#238 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:00,938 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/051f22f19d6243b0a53fd07e016734d5 is 50, key is test_row_0/A:col10/1733656860428/Put/seqid=0 2024-12-08T11:21:00,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742109_1285 (size=12359) 2024-12-08T11:21:00,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742110_1286 (size=12359) 2024-12-08T11:21:01,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-08T11:21:01,055 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-08T11:21:01,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:01,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-08T11:21:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:01,059 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T11:21:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T11:21:01,060 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:01,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:01,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:01,060 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:01,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:01,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:01,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:01,060 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:01,060 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:01,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/8f0c97a82f0c4aad9112f1a1934981e8 is 50, key is test_row_0/A:col10/1733656860449/Put/seqid=0 2024-12-08T11:21:01,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742111_1287 (size=14541) 2024-12-08T11:21:01,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656921071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656921072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656921073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656921074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656921074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T11:21:01,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656921175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656921175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656921177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656921177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656921177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,214 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:01,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:01,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,215 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T11:21:01,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,372 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2a72c8cea8af4372888382677cba2b77 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2a72c8cea8af4372888382677cba2b77 2024-12-08T11:21:01,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:01,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:01,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,376 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,378 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into 2a72c8cea8af4372888382677cba2b77(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:01,378 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:01,379 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=13, startTime=1733656860918; duration=0sec 2024-12-08T11:21:01,379 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:01,379 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:01,379 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:01,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656921380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656921380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656921380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656921380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,381 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656921380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,382 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:01,382 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:01,382 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,382 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bb77684d12ca4f38839ad1fbec5b452b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c1293455a7c54f06880deb42c1748bbb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c0908b11886e42a78b86575337928136] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=35.4 K 2024-12-08T11:21:01,383 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting bb77684d12ca4f38839ad1fbec5b452b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1733656858645 2024-12-08T11:21:01,383 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c1293455a7c54f06880deb42c1748bbb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1733656858675 2024-12-08T11:21:01,384 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c0908b11886e42a78b86575337928136, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656859312 2024-12-08T11:21:01,387 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/051f22f19d6243b0a53fd07e016734d5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/051f22f19d6243b0a53fd07e016734d5 2024-12-08T11:21:01,393 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into 051f22f19d6243b0a53fd07e016734d5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:01,393 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:01,393 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=13, startTime=1733656860917; duration=0sec 2024-12-08T11:21:01,393 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:01,393 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:01,411 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#240 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:01,412 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/fe1be07d01b1437a8ce5426370c4861b is 50, key is test_row_0/C:col10/1733656860428/Put/seqid=0 2024-12-08T11:21:01,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742112_1288 (size=12359) 2024-12-08T11:21:01,472 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/8f0c97a82f0c4aad9112f1a1934981e8 2024-12-08T11:21:01,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/1a599df37c1f47d2938d3229afbb3f49 is 50, key is test_row_0/B:col10/1733656860449/Put/seqid=0 2024-12-08T11:21:01,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742113_1289 (size=12151) 2024-12-08T11:21:01,529 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,529 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:01,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:01,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T11:21:01,682 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,683 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:01,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:01,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656921681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656921682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656921682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656921684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:01,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656921685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,836 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:01,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:01,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,841 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/fe1be07d01b1437a8ce5426370c4861b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/fe1be07d01b1437a8ce5426370c4861b 2024-12-08T11:21:01,846 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into fe1be07d01b1437a8ce5426370c4861b(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:01,846 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:01,846 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=13, startTime=1733656860918; duration=0sec 2024-12-08T11:21:01,846 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:01,846 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:01,892 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/1a599df37c1f47d2938d3229afbb3f49 2024-12-08T11:21:01,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/406753c5020742288b1c674d510fb042 is 50, key is test_row_0/C:col10/1733656860449/Put/seqid=0 2024-12-08T11:21:01,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742114_1290 (size=12151) 2024-12-08T11:21:01,993 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:01,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:01,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:01,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:01,994 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:01,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:02,148 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:02,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:02,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:02,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:02,148 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:02,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:02,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T11:21:02,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:02,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656922184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:02,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656922185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:02,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656922187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:02,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656922187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:02,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656922191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,301 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:02,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:02,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:02,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:02,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:02,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:02,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:02,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/406753c5020742288b1c674d510fb042 2024-12-08T11:21:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/8f0c97a82f0c4aad9112f1a1934981e8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8f0c97a82f0c4aad9112f1a1934981e8 2024-12-08T11:21:02,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8f0c97a82f0c4aad9112f1a1934981e8, entries=200, sequenceid=154, filesize=14.2 K 2024-12-08T11:21:02,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/1a599df37c1f47d2938d3229afbb3f49 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/1a599df37c1f47d2938d3229afbb3f49 2024-12-08T11:21:02,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/1a599df37c1f47d2938d3229afbb3f49, entries=150, sequenceid=154, filesize=11.9 K 2024-12-08T11:21:02,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/406753c5020742288b1c674d510fb042 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/406753c5020742288b1c674d510fb042 2024-12-08T11:21:02,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/406753c5020742288b1c674d510fb042, entries=150, sequenceid=154, filesize=11.9 K 2024-12-08T11:21:02,336 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for eada905a64e5510a34d2159c0d8947a2 in 1277ms, sequenceid=154, compaction requested=false 2024-12-08T11:21:02,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:02,455 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:02,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-08T11:21:02,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:02,456 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:21:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:02,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:02,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/03581e7f97b34f02adc4f5bd5069989e is 50, key is test_row_0/A:col10/1733656861073/Put/seqid=0 2024-12-08T11:21:02,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742115_1291 (size=12151) 2024-12-08T11:21:02,495 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/03581e7f97b34f02adc4f5bd5069989e 2024-12-08T11:21:02,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2055b88e90534c0fb79ec2580341337a is 50, key is test_row_0/B:col10/1733656861073/Put/seqid=0 2024-12-08T11:21:02,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742116_1292 (size=12151) 2024-12-08T11:21:02,918 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2055b88e90534c0fb79ec2580341337a 2024-12-08T11:21:02,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/f431f0f5f1a247fb9567dd32d3a84c4b is 50, key is test_row_0/C:col10/1733656861073/Put/seqid=0 2024-12-08T11:21:02,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742117_1293 (size=12151) 2024-12-08T11:21:03,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T11:21:03,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:03,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:03,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656923206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656923206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656923207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656923208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656923209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656923310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656923312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656923312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656923312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656923320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,330 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/f431f0f5f1a247fb9567dd32d3a84c4b 2024-12-08T11:21:03,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/03581e7f97b34f02adc4f5bd5069989e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/03581e7f97b34f02adc4f5bd5069989e 2024-12-08T11:21:03,339 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/03581e7f97b34f02adc4f5bd5069989e, entries=150, sequenceid=170, filesize=11.9 K 2024-12-08T11:21:03,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2055b88e90534c0fb79ec2580341337a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2055b88e90534c0fb79ec2580341337a 2024-12-08T11:21:03,345 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2055b88e90534c0fb79ec2580341337a, entries=150, sequenceid=170, filesize=11.9 K 2024-12-08T11:21:03,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/f431f0f5f1a247fb9567dd32d3a84c4b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f431f0f5f1a247fb9567dd32d3a84c4b 2024-12-08T11:21:03,350 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f431f0f5f1a247fb9567dd32d3a84c4b, entries=150, sequenceid=170, filesize=11.9 K 2024-12-08T11:21:03,351 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for eada905a64e5510a34d2159c0d8947a2 in 895ms, sequenceid=170, compaction requested=true 2024-12-08T11:21:03,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:03,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:03,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-08T11:21:03,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-08T11:21:03,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-08T11:21:03,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2920 sec 2024-12-08T11:21:03,355 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 2.2970 sec 2024-12-08T11:21:03,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:03,514 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:21:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:03,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:03,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/1d6a09c6c6eb49cf95e08b0dacee6daa is 50, key is test_row_0/A:col10/1733656863513/Put/seqid=0 2024-12-08T11:21:03,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656923523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656923524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,527 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656923526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742118_1294 (size=12151) 2024-12-08T11:21:03,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656923527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656923537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656923628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656923628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656923629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656923637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656923638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,834 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656923832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656923833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656923833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656923840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:03,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656923843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:03,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/1d6a09c6c6eb49cf95e08b0dacee6daa 2024-12-08T11:21:03,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ab8d3fd586e4480ea95262be52d01b67 is 50, key is test_row_0/B:col10/1733656863513/Put/seqid=0 2024-12-08T11:21:03,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742119_1295 (size=12151) 2024-12-08T11:21:03,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ab8d3fd586e4480ea95262be52d01b67 2024-12-08T11:21:03,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/edf31cdc982d4cdaa1d77845ade33a3e is 50, key is test_row_0/C:col10/1733656863513/Put/seqid=0 2024-12-08T11:21:03,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742120_1296 (size=12151) 2024-12-08T11:21:03,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/edf31cdc982d4cdaa1d77845ade33a3e 2024-12-08T11:21:03,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/1d6a09c6c6eb49cf95e08b0dacee6daa as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1d6a09c6c6eb49cf95e08b0dacee6daa 2024-12-08T11:21:03,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1d6a09c6c6eb49cf95e08b0dacee6daa, entries=150, sequenceid=194, filesize=11.9 K 2024-12-08T11:21:03,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ab8d3fd586e4480ea95262be52d01b67 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ab8d3fd586e4480ea95262be52d01b67 2024-12-08T11:21:04,005 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ab8d3fd586e4480ea95262be52d01b67, entries=150, sequenceid=194, filesize=11.9 K 2024-12-08T11:21:04,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/edf31cdc982d4cdaa1d77845ade33a3e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/edf31cdc982d4cdaa1d77845ade33a3e 2024-12-08T11:21:04,012 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/edf31cdc982d4cdaa1d77845ade33a3e, entries=150, sequenceid=194, filesize=11.9 K 2024-12-08T11:21:04,013 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for eada905a64e5510a34d2159c0d8947a2 in 499ms, sequenceid=194, compaction requested=true 2024-12-08T11:21:04,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:04,013 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:04,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:04,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:04,014 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:04,014 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:04,015 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:04,015 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:04,015 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:04,015 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2a72c8cea8af4372888382677cba2b77, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/1a599df37c1f47d2938d3229afbb3f49, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2055b88e90534c0fb79ec2580341337a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ab8d3fd586e4480ea95262be52d01b67] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=47.7 K 2024-12-08T11:21:04,016 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51202 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:04,016 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:04,016 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:04,016 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/051f22f19d6243b0a53fd07e016734d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8f0c97a82f0c4aad9112f1a1934981e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/03581e7f97b34f02adc4f5bd5069989e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1d6a09c6c6eb49cf95e08b0dacee6daa] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=50.0 K 2024-12-08T11:21:04,016 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 051f22f19d6243b0a53fd07e016734d5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656859312 2024-12-08T11:21:04,016 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a72c8cea8af4372888382677cba2b77, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656859312 2024-12-08T11:21:04,017 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 1a599df37c1f47d2938d3229afbb3f49, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733656860449 2024-12-08T11:21:04,017 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f0c97a82f0c4aad9112f1a1934981e8, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733656860445 2024-12-08T11:21:04,017 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2055b88e90534c0fb79ec2580341337a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656861071 2024-12-08T11:21:04,018 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03581e7f97b34f02adc4f5bd5069989e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656861071 2024-12-08T11:21:04,018 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ab8d3fd586e4480ea95262be52d01b67, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656863205 2024-12-08T11:21:04,018 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d6a09c6c6eb49cf95e08b0dacee6daa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656863205 2024-12-08T11:21:04,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:04,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:04,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:04,030 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#249 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:04,031 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/9a868baabb5a4542a2cb9c410bec1f8e is 50, key is test_row_0/B:col10/1733656863513/Put/seqid=0 2024-12-08T11:21:04,034 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#250 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:04,034 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/d55461b4353346768beed6a22f4b283b is 50, key is test_row_0/A:col10/1733656863513/Put/seqid=0 2024-12-08T11:21:04,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742121_1297 (size=12595) 2024-12-08T11:21:04,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742122_1298 (size=12595) 2024-12-08T11:21:04,051 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/d55461b4353346768beed6a22f4b283b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/d55461b4353346768beed6a22f4b283b 2024-12-08T11:21:04,058 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into d55461b4353346768beed6a22f4b283b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:04,058 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:04,058 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=12, startTime=1733656864013; duration=0sec 2024-12-08T11:21:04,058 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:04,058 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:04,058 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:04,059 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:04,059 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:04,060 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:04,060 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/fe1be07d01b1437a8ce5426370c4861b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/406753c5020742288b1c674d510fb042, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f431f0f5f1a247fb9567dd32d3a84c4b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/edf31cdc982d4cdaa1d77845ade33a3e] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=47.7 K 2024-12-08T11:21:04,060 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe1be07d01b1437a8ce5426370c4861b, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656859312 2024-12-08T11:21:04,060 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 406753c5020742288b1c674d510fb042, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733656860449 2024-12-08T11:21:04,061 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f431f0f5f1a247fb9567dd32d3a84c4b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656861071 2024-12-08T11:21:04,062 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting edf31cdc982d4cdaa1d77845ade33a3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656863205 2024-12-08T11:21:04,073 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:04,074 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2ae055deeeb94ffb848805da5d8b852f is 50, key is test_row_0/C:col10/1733656863513/Put/seqid=0 2024-12-08T11:21:04,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742123_1299 (size=12595) 2024-12-08T11:21:04,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:04,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T11:21:04,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:04,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:04,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:04,174 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/b362ea4ffe854029beae9c054e6baac2 is 50, key is test_row_0/A:col10/1733656864139/Put/seqid=0 2024-12-08T11:21:04,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742124_1300 (size=12151) 2024-12-08T11:21:04,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/b362ea4ffe854029beae9c054e6baac2 2024-12-08T11:21:04,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656924191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ff23fc560b6d46bab07d9e2e12f34704 is 50, key is test_row_0/B:col10/1733656864139/Put/seqid=0 2024-12-08T11:21:04,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656924194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656924194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656924194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656924196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742125_1301 (size=12151) 2024-12-08T11:21:04,297 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656924296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656924299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656924299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,304 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656924301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656924301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,447 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/9a868baabb5a4542a2cb9c410bec1f8e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a868baabb5a4542a2cb9c410bec1f8e 2024-12-08T11:21:04,452 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into 9a868baabb5a4542a2cb9c410bec1f8e(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:04,452 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:04,452 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=12, startTime=1733656864014; duration=0sec 2024-12-08T11:21:04,452 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:04,452 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:04,493 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2ae055deeeb94ffb848805da5d8b852f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ae055deeeb94ffb848805da5d8b852f 2024-12-08T11:21:04,497 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into 2ae055deeeb94ffb848805da5d8b852f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:04,497 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:04,497 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=12, startTime=1733656864020; duration=0sec 2024-12-08T11:21:04,498 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:04,498 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:04,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656924500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656924502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656924503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656924505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656924506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ff23fc560b6d46bab07d9e2e12f34704 2024-12-08T11:21:04,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/f9be1a3290604675bfbc37ae6bd2546d is 50, key is test_row_0/C:col10/1733656864139/Put/seqid=0 2024-12-08T11:21:04,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742126_1302 (size=12151) 2024-12-08T11:21:04,657 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/f9be1a3290604675bfbc37ae6bd2546d 2024-12-08T11:21:04,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/b362ea4ffe854029beae9c054e6baac2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b362ea4ffe854029beae9c054e6baac2 2024-12-08T11:21:04,675 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b362ea4ffe854029beae9c054e6baac2, entries=150, sequenceid=211, filesize=11.9 K 2024-12-08T11:21:04,677 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/ff23fc560b6d46bab07d9e2e12f34704 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ff23fc560b6d46bab07d9e2e12f34704 2024-12-08T11:21:04,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ff23fc560b6d46bab07d9e2e12f34704, entries=150, sequenceid=211, filesize=11.9 K 2024-12-08T11:21:04,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/f9be1a3290604675bfbc37ae6bd2546d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f9be1a3290604675bfbc37ae6bd2546d 2024-12-08T11:21:04,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f9be1a3290604675bfbc37ae6bd2546d, entries=150, sequenceid=211, filesize=11.9 K 2024-12-08T11:21:04,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for eada905a64e5510a34d2159c0d8947a2 in 515ms, sequenceid=211, compaction requested=false 2024-12-08T11:21:04,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:04,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:04,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-08T11:21:04,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:04,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:04,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:04,807 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3634c76c265e4809b63f327ba4830343 is 50, key is test_row_0/A:col10/1733656864805/Put/seqid=0 2024-12-08T11:21:04,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742127_1303 (size=14541) 2024-12-08T11:21:04,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656924817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656924819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656924817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656924821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656924822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,837 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3634c76c265e4809b63f327ba4830343 2024-12-08T11:21:04,847 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/c9ac20cfd42a440590008eeff4339fca is 50, key is test_row_0/B:col10/1733656864805/Put/seqid=0 2024-12-08T11:21:04,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742128_1304 (size=12151) 2024-12-08T11:21:04,859 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/c9ac20cfd42a440590008eeff4339fca 2024-12-08T11:21:04,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/972dd010928a40dbbeee6ee69b67448f is 50, key is test_row_0/C:col10/1733656864805/Put/seqid=0 2024-12-08T11:21:04,872 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742129_1305 (size=12151) 2024-12-08T11:21:04,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/972dd010928a40dbbeee6ee69b67448f 2024-12-08T11:21:04,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3634c76c265e4809b63f327ba4830343 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3634c76c265e4809b63f327ba4830343 2024-12-08T11:21:04,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3634c76c265e4809b63f327ba4830343, entries=200, sequenceid=235, filesize=14.2 K 2024-12-08T11:21:04,886 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/c9ac20cfd42a440590008eeff4339fca as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c9ac20cfd42a440590008eeff4339fca 2024-12-08T11:21:04,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c9ac20cfd42a440590008eeff4339fca, entries=150, sequenceid=235, filesize=11.9 K 2024-12-08T11:21:04,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/972dd010928a40dbbeee6ee69b67448f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/972dd010928a40dbbeee6ee69b67448f 2024-12-08T11:21:04,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/972dd010928a40dbbeee6ee69b67448f, entries=150, sequenceid=235, filesize=11.9 K 2024-12-08T11:21:04,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for eada905a64e5510a34d2159c0d8947a2 in 93ms, sequenceid=235, compaction requested=true 2024-12-08T11:21:04,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:04,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:04,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:04,900 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:04,900 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:04,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:04,901 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39287 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:04,901 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:04,901 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:04,901 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:04,901 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:04,901 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:04,901 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a868baabb5a4542a2cb9c410bec1f8e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ff23fc560b6d46bab07d9e2e12f34704, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c9ac20cfd42a440590008eeff4339fca] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.0 K 2024-12-08T11:21:04,901 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/d55461b4353346768beed6a22f4b283b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b362ea4ffe854029beae9c054e6baac2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3634c76c265e4809b63f327ba4830343] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=38.4 K 2024-12-08T11:21:04,902 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d55461b4353346768beed6a22f4b283b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656863205 2024-12-08T11:21:04,902 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a868baabb5a4542a2cb9c410bec1f8e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656863205 2024-12-08T11:21:04,902 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b362ea4ffe854029beae9c054e6baac2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733656863526 2024-12-08T11:21:04,902 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ff23fc560b6d46bab07d9e2e12f34704, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733656863526 2024-12-08T11:21:04,903 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3634c76c265e4809b63f327ba4830343, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656864187 2024-12-08T11:21:04,903 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c9ac20cfd42a440590008eeff4339fca, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656864192 2024-12-08T11:21:04,914 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:04,914 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/f5f3e92d5e0645048fca950afa459d8a is 50, key is test_row_0/B:col10/1733656864805/Put/seqid=0 2024-12-08T11:21:04,922 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#259 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:04,922 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3738ac535aa242d5bd41c82cbef6dbd8 is 50, key is test_row_0/A:col10/1733656864805/Put/seqid=0 2024-12-08T11:21:04,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:04,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T11:21:04,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:04,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:04,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:04,926 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:04,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/1c336c78407c4397b700c03714f5acf6 is 50, key is test_row_0/A:col10/1733656864924/Put/seqid=0 2024-12-08T11:21:04,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742130_1306 (size=12697) 2024-12-08T11:21:04,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742131_1307 (size=12697) 2024-12-08T11:21:04,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742132_1308 (size=14541) 2024-12-08T11:21:04,945 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656924939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,948 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/f5f3e92d5e0645048fca950afa459d8a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f5f3e92d5e0645048fca950afa459d8a 2024-12-08T11:21:04,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656924944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656924944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656924946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:04,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656924946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:04,955 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into f5f3e92d5e0645048fca950afa459d8a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:04,955 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:04,955 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=13, startTime=1733656864900; duration=0sec 2024-12-08T11:21:04,955 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:04,955 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:04,955 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:04,957 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:04,957 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:04,957 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:04,957 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ae055deeeb94ffb848805da5d8b852f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f9be1a3290604675bfbc37ae6bd2546d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/972dd010928a40dbbeee6ee69b67448f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.0 K 2024-12-08T11:21:04,958 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ae055deeeb94ffb848805da5d8b852f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656863205 2024-12-08T11:21:04,958 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f9be1a3290604675bfbc37ae6bd2546d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1733656863526 2024-12-08T11:21:04,958 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 972dd010928a40dbbeee6ee69b67448f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656864192 2024-12-08T11:21:04,975 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#261 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:04,976 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/6e05fb556cf14877b733186360c3e4b4 is 50, key is test_row_0/C:col10/1733656864805/Put/seqid=0 2024-12-08T11:21:04,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742133_1309 (size=12697) 2024-12-08T11:21:05,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656925047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656925050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656925050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656925050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656925050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-08T11:21:05,164 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-08T11:21:05,166 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:05,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-08T11:21:05,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T11:21:05,168 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:05,168 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:05,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:05,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656925250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656925253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656925254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656925255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656925255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T11:21:05,320 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T11:21:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/1c336c78407c4397b700c03714f5acf6 2024-12-08T11:21:05,348 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3738ac535aa242d5bd41c82cbef6dbd8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3738ac535aa242d5bd41c82cbef6dbd8 2024-12-08T11:21:05,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/b7ceb633cdbc46caa950409484350d71 is 50, key is test_row_0/B:col10/1733656864924/Put/seqid=0 2024-12-08T11:21:05,354 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into 3738ac535aa242d5bd41c82cbef6dbd8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:05,354 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:05,354 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=13, startTime=1733656864899; duration=0sec 2024-12-08T11:21:05,354 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:05,354 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:05,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742134_1310 (size=12151) 2024-12-08T11:21:05,390 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/6e05fb556cf14877b733186360c3e4b4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/6e05fb556cf14877b733186360c3e4b4 2024-12-08T11:21:05,396 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into 6e05fb556cf14877b733186360c3e4b4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:05,396 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:05,396 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=13, startTime=1733656864900; duration=0sec 2024-12-08T11:21:05,396 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:05,396 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:05,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T11:21:05,473 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T11:21:05,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:05,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656925556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656925557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656925560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656925560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656925560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,626 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T11:21:05,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:05,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,757 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/b7ceb633cdbc46caa950409484350d71 2024-12-08T11:21:05,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/7413f38c72f147ae9c985f729c41b7c3 is 50, key is test_row_0/C:col10/1733656864924/Put/seqid=0 2024-12-08T11:21:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T11:21:05,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742135_1311 (size=12151) 2024-12-08T11:21:05,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T11:21:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,780 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,935 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:05,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T11:21:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:05,936 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:06,061 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:06,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656926059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:06,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656926062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:06,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:06,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656926062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:06,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656926063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:06,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:06,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656926067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:06,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:06,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T11:21:06,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:06,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:06,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:06,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:06,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:06,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:06,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/7413f38c72f147ae9c985f729c41b7c3 2024-12-08T11:21:06,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/1c336c78407c4397b700c03714f5acf6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1c336c78407c4397b700c03714f5acf6 2024-12-08T11:21:06,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1c336c78407c4397b700c03714f5acf6, entries=200, sequenceid=251, filesize=14.2 K 2024-12-08T11:21:06,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/b7ceb633cdbc46caa950409484350d71 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/b7ceb633cdbc46caa950409484350d71 2024-12-08T11:21:06,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/b7ceb633cdbc46caa950409484350d71, entries=150, sequenceid=251, filesize=11.9 K 2024-12-08T11:21:06,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/7413f38c72f147ae9c985f729c41b7c3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/7413f38c72f147ae9c985f729c41b7c3 2024-12-08T11:21:06,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/7413f38c72f147ae9c985f729c41b7c3, entries=150, sequenceid=251, filesize=11.9 K 2024-12-08T11:21:06,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for eada905a64e5510a34d2159c0d8947a2 in 1266ms, sequenceid=251, compaction requested=false 2024-12-08T11:21:06,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:06,242 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:06,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-08T11:21:06,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:06,243 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:21:06,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:06,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:06,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:06,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:06,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:06,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:06,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/174f7e4e938f46b28b779895580e2028 is 50, key is test_row_0/A:col10/1733656864944/Put/seqid=0 2024-12-08T11:21:06,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742136_1312 (size=12301) 2024-12-08T11:21:06,255 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/174f7e4e938f46b28b779895580e2028 2024-12-08T11:21:06,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2f9905de78044c3b8c79c659b10ddf0f is 50, key is test_row_0/B:col10/1733656864944/Put/seqid=0 2024-12-08T11:21:06,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742137_1313 (size=12301) 2024-12-08T11:21:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T11:21:06,668 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2f9905de78044c3b8c79c659b10ddf0f 2024-12-08T11:21:06,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2b26c3081e724648b9de0c42f728c66f is 50, key is test_row_0/C:col10/1733656864944/Put/seqid=0 2024-12-08T11:21:06,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742138_1314 (size=12301) 2024-12-08T11:21:07,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:07,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:07,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656927075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656927075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656927076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656927077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656927078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,085 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2b26c3081e724648b9de0c42f728c66f 2024-12-08T11:21:07,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/174f7e4e938f46b28b779895580e2028 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/174f7e4e938f46b28b779895580e2028 2024-12-08T11:21:07,094 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/174f7e4e938f46b28b779895580e2028, entries=150, sequenceid=274, filesize=12.0 K 2024-12-08T11:21:07,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2f9905de78044c3b8c79c659b10ddf0f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2f9905de78044c3b8c79c659b10ddf0f 2024-12-08T11:21:07,099 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2f9905de78044c3b8c79c659b10ddf0f, entries=150, sequenceid=274, filesize=12.0 K 2024-12-08T11:21:07,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2b26c3081e724648b9de0c42f728c66f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2b26c3081e724648b9de0c42f728c66f 2024-12-08T11:21:07,108 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2b26c3081e724648b9de0c42f728c66f, entries=150, sequenceid=274, filesize=12.0 K 2024-12-08T11:21:07,109 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for eada905a64e5510a34d2159c0d8947a2 in 866ms, sequenceid=274, compaction requested=true 2024-12-08T11:21:07,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:07,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-08T11:21:07,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-08T11:21:07,113 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-08T11:21:07,113 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9420 sec 2024-12-08T11:21:07,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.9480 sec 2024-12-08T11:21:07,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:07,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:21:07,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:07,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:07,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:07,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:07,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:07,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:07,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/6795e4160db945e6ad8c15fbbf86a5b7 is 50, key is test_row_0/A:col10/1733656867181/Put/seqid=0 2024-12-08T11:21:07,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742139_1315 (size=12301) 2024-12-08T11:21:07,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656927198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656927199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,205 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656927200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656927202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,208 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656927204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-08T11:21:07,272 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-08T11:21:07,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:07,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-08T11:21:07,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T11:21:07,278 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:07,280 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:07,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:07,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656927306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656927306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656927306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656927306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656927309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T11:21:07,431 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,431 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T11:21:07,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:07,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:07,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:07,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:07,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656927510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,512 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656927511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656927511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656927511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656927514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T11:21:07,584 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,584 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T11:21:07,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:07,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:07,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:07,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:07,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/6795e4160db945e6ad8c15fbbf86a5b7 2024-12-08T11:21:07,609 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/0a63d8f91f9543898efa97161d1a0acd is 50, key is test_row_0/B:col10/1733656867181/Put/seqid=0 2024-12-08T11:21:07,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742140_1316 (size=12301) 2024-12-08T11:21:07,614 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/0a63d8f91f9543898efa97161d1a0acd 2024-12-08T11:21:07,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/412e3d9dcb114965a2a52f5a45eeb4c1 is 50, key is test_row_0/C:col10/1733656867181/Put/seqid=0 2024-12-08T11:21:07,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742141_1317 (size=12301) 2024-12-08T11:21:07,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/412e3d9dcb114965a2a52f5a45eeb4c1 2024-12-08T11:21:07,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/6795e4160db945e6ad8c15fbbf86a5b7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6795e4160db945e6ad8c15fbbf86a5b7 2024-12-08T11:21:07,636 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6795e4160db945e6ad8c15fbbf86a5b7, entries=150, sequenceid=292, filesize=12.0 K 2024-12-08T11:21:07,636 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/0a63d8f91f9543898efa97161d1a0acd as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/0a63d8f91f9543898efa97161d1a0acd 2024-12-08T11:21:07,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/0a63d8f91f9543898efa97161d1a0acd, entries=150, sequenceid=292, filesize=12.0 K 2024-12-08T11:21:07,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/412e3d9dcb114965a2a52f5a45eeb4c1 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/412e3d9dcb114965a2a52f5a45eeb4c1 2024-12-08T11:21:07,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/412e3d9dcb114965a2a52f5a45eeb4c1, entries=150, sequenceid=292, filesize=12.0 K 2024-12-08T11:21:07,647 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for eada905a64e5510a34d2159c0d8947a2 in 465ms, sequenceid=292, compaction requested=true 2024-12-08T11:21:07,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:07,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:07,647 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:07,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:07,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:07,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:07,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:07,647 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:07,647 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:07,648 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51840 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:07,648 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:07,649 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,649 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3738ac535aa242d5bd41c82cbef6dbd8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1c336c78407c4397b700c03714f5acf6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/174f7e4e938f46b28b779895580e2028, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6795e4160db945e6ad8c15fbbf86a5b7] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=50.6 K 2024-12-08T11:21:07,649 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:07,649 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:07,649 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,649 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f5f3e92d5e0645048fca950afa459d8a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/b7ceb633cdbc46caa950409484350d71, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2f9905de78044c3b8c79c659b10ddf0f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/0a63d8f91f9543898efa97161d1a0acd] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=48.3 K 2024-12-08T11:21:07,649 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3738ac535aa242d5bd41c82cbef6dbd8, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656864192 2024-12-08T11:21:07,650 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f5f3e92d5e0645048fca950afa459d8a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656864192 2024-12-08T11:21:07,650 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c336c78407c4397b700c03714f5acf6, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733656864819 2024-12-08T11:21:07,651 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b7ceb633cdbc46caa950409484350d71, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733656864819 2024-12-08T11:21:07,651 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 174f7e4e938f46b28b779895580e2028, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733656864939 2024-12-08T11:21:07,651 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f9905de78044c3b8c79c659b10ddf0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733656864939 2024-12-08T11:21:07,651 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6795e4160db945e6ad8c15fbbf86a5b7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656867077 2024-12-08T11:21:07,652 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 0a63d8f91f9543898efa97161d1a0acd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656867077 2024-12-08T11:21:07,664 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#270 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:07,665 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/f7ad68f2e7be4fe6839954453152affe is 50, key is test_row_0/B:col10/1733656867181/Put/seqid=0 2024-12-08T11:21:07,668 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#271 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:07,669 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/51949c2a53df4b32a620260e113da4d5 is 50, key is test_row_0/A:col10/1733656867181/Put/seqid=0 2024-12-08T11:21:07,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742142_1318 (size=12983) 2024-12-08T11:21:07,678 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/f7ad68f2e7be4fe6839954453152affe as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f7ad68f2e7be4fe6839954453152affe 2024-12-08T11:21:07,683 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into f7ad68f2e7be4fe6839954453152affe(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:07,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742143_1319 (size=12983) 2024-12-08T11:21:07,683 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:07,683 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=12, startTime=1733656867647; duration=0sec 2024-12-08T11:21:07,684 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:07,684 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:07,684 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:07,686 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:07,686 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:07,686 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,686 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/6e05fb556cf14877b733186360c3e4b4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/7413f38c72f147ae9c985f729c41b7c3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2b26c3081e724648b9de0c42f728c66f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/412e3d9dcb114965a2a52f5a45eeb4c1] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=48.3 K 2024-12-08T11:21:07,689 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e05fb556cf14877b733186360c3e4b4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656864192 2024-12-08T11:21:07,689 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7413f38c72f147ae9c985f729c41b7c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1733656864819 2024-12-08T11:21:07,690 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b26c3081e724648b9de0c42f728c66f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1733656864939 2024-12-08T11:21:07,690 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 412e3d9dcb114965a2a52f5a45eeb4c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656867077 2024-12-08T11:21:07,696 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/51949c2a53df4b32a620260e113da4d5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/51949c2a53df4b32a620260e113da4d5 2024-12-08T11:21:07,705 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into 51949c2a53df4b32a620260e113da4d5(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:07,705 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:07,705 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=12, startTime=1733656867647; duration=0sec 2024-12-08T11:21:07,706 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:07,706 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:07,711 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#272 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:07,712 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/093cede8b6b543179836b824fd43d43a is 50, key is test_row_0/C:col10/1733656867181/Put/seqid=0 2024-12-08T11:21:07,737 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-08T11:21:07,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:07,738 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T11:21:07,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:07,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:07,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:07,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:07,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:07,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:07,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/6b2b712c3adb47d08ab75f28a600069f is 50, key is test_row_0/A:col10/1733656867192/Put/seqid=0 2024-12-08T11:21:07,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742144_1320 (size=12983) 2024-12-08T11:21:07,754 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/093cede8b6b543179836b824fd43d43a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/093cede8b6b543179836b824fd43d43a 2024-12-08T11:21:07,759 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into 093cede8b6b543179836b824fd43d43a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:07,759 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:07,759 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=12, startTime=1733656867647; duration=0sec 2024-12-08T11:21:07,760 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:07,760 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:07,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742145_1321 (size=12301) 2024-12-08T11:21:07,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:07,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:07,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656927825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656927826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656927826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656927827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656927828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T11:21:07,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656927929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656927929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656927930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656927930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:07,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656927932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656928133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656928133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656928133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656928134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656928137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,163 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/6b2b712c3adb47d08ab75f28a600069f 2024-12-08T11:21:08,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2ba16c5c4e904db5a15180142ef2f70e is 50, key is test_row_0/B:col10/1733656867192/Put/seqid=0 2024-12-08T11:21:08,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742146_1322 (size=12301) 2024-12-08T11:21:08,182 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2ba16c5c4e904db5a15180142ef2f70e 2024-12-08T11:21:08,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/b9a4213a31e644c0b74d2243a5b35e96 is 50, key is test_row_0/C:col10/1733656867192/Put/seqid=0 2024-12-08T11:21:08,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742147_1323 (size=12301) 2024-12-08T11:21:08,196 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/b9a4213a31e644c0b74d2243a5b35e96 2024-12-08T11:21:08,201 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/6b2b712c3adb47d08ab75f28a600069f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6b2b712c3adb47d08ab75f28a600069f 2024-12-08T11:21:08,206 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6b2b712c3adb47d08ab75f28a600069f, entries=150, sequenceid=312, filesize=12.0 K 2024-12-08T11:21:08,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2ba16c5c4e904db5a15180142ef2f70e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2ba16c5c4e904db5a15180142ef2f70e 2024-12-08T11:21:08,212 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2ba16c5c4e904db5a15180142ef2f70e, entries=150, sequenceid=312, filesize=12.0 K 2024-12-08T11:21:08,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/b9a4213a31e644c0b74d2243a5b35e96 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/b9a4213a31e644c0b74d2243a5b35e96 2024-12-08T11:21:08,218 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/b9a4213a31e644c0b74d2243a5b35e96, entries=150, sequenceid=312, filesize=12.0 K 2024-12-08T11:21:08,220 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for eada905a64e5510a34d2159c0d8947a2 in 482ms, sequenceid=312, compaction requested=false 2024-12-08T11:21:08,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:08,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-08T11:21:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-08T11:21:08,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-08T11:21:08,223 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 941 msec 2024-12-08T11:21:08,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 950 msec 2024-12-08T11:21:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-08T11:21:08,380 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-08T11:21:08,382 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-08T11:21:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T11:21:08,384 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:08,385 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:08,385 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:08,437 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T11:21:08,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:08,437 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:08,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:08,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:08,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:08,438 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:08,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/073e1a1256b6448eb1e14aa3cdb0b3df is 50, key is test_row_0/A:col10/1733656868436/Put/seqid=0 2024-12-08T11:21:08,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742148_1324 (size=12301) 2024-12-08T11:21:08,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656928455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656928456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656928456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656928457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656928458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T11:21:08,536 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-08T11:21:08,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:08,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656928560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656928560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656928560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656928560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656928560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T11:21:08,694 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-08T11:21:08,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:08,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656928763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656928763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656928763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656928763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:08,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656928764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,847 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:08,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-08T11:21:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:08,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/073e1a1256b6448eb1e14aa3cdb0b3df 2024-12-08T11:21:08,866 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/8a8da24d8eac410e867b1838b45eaa2e is 50, key is test_row_0/B:col10/1733656868436/Put/seqid=0 2024-12-08T11:21:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742149_1325 (size=12301) 2024-12-08T11:21:08,882 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/8a8da24d8eac410e867b1838b45eaa2e 2024-12-08T11:21:08,889 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/0d85b2d2d7824095bfeb2971bd09e082 is 50, key is test_row_0/C:col10/1733656868436/Put/seqid=0 2024-12-08T11:21:08,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742150_1326 (size=12301) 2024-12-08T11:21:08,894 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/0d85b2d2d7824095bfeb2971bd09e082 2024-12-08T11:21:08,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/073e1a1256b6448eb1e14aa3cdb0b3df as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/073e1a1256b6448eb1e14aa3cdb0b3df 2024-12-08T11:21:08,917 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/073e1a1256b6448eb1e14aa3cdb0b3df, entries=150, sequenceid=332, filesize=12.0 K 2024-12-08T11:21:08,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/8a8da24d8eac410e867b1838b45eaa2e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8a8da24d8eac410e867b1838b45eaa2e 2024-12-08T11:21:08,925 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8a8da24d8eac410e867b1838b45eaa2e, entries=150, sequenceid=332, filesize=12.0 K 2024-12-08T11:21:08,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/0d85b2d2d7824095bfeb2971bd09e082 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/0d85b2d2d7824095bfeb2971bd09e082 2024-12-08T11:21:08,935 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/0d85b2d2d7824095bfeb2971bd09e082, entries=150, sequenceid=332, filesize=12.0 K 2024-12-08T11:21:08,936 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for eada905a64e5510a34d2159c0d8947a2 in 499ms, sequenceid=332, compaction requested=true 2024-12-08T11:21:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:08,936 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:08,936 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:08,936 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:08,937 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:08,937 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:08,937 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,938 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/51949c2a53df4b32a620260e113da4d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6b2b712c3adb47d08ab75f28a600069f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/073e1a1256b6448eb1e14aa3cdb0b3df] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.7 K 2024-12-08T11:21:08,939 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:08,939 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51949c2a53df4b32a620260e113da4d5, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656867077 2024-12-08T11:21:08,939 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:08,939 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:08,939 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f7ad68f2e7be4fe6839954453152affe, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2ba16c5c4e904db5a15180142ef2f70e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8a8da24d8eac410e867b1838b45eaa2e] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.7 K 2024-12-08T11:21:08,939 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b2b712c3adb47d08ab75f28a600069f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733656867192 2024-12-08T11:21:08,939 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f7ad68f2e7be4fe6839954453152affe, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656867077 2024-12-08T11:21:08,939 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 073e1a1256b6448eb1e14aa3cdb0b3df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733656867826 2024-12-08T11:21:08,940 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ba16c5c4e904db5a15180142ef2f70e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733656867192 2024-12-08T11:21:08,940 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 8a8da24d8eac410e867b1838b45eaa2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733656867826 2024-12-08T11:21:08,955 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:08,955 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3ff337b83ff1410b89b017480440a416 is 50, key is test_row_0/A:col10/1733656868436/Put/seqid=0 2024-12-08T11:21:08,970 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:08,973 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/a71df23039a44d2399111f2c191a1dc1 is 50, key is test_row_0/B:col10/1733656868436/Put/seqid=0 2024-12-08T11:21:08,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T11:21:09,000 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-08T11:21:09,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,001 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:21:09,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:09,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:09,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:09,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:09,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:09,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:09,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742151_1327 (size=13085) 2024-12-08T11:21:09,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742152_1328 (size=13085) 2024-12-08T11:21:09,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/0213f823f4474070b6ecb7c76f1e3f10 is 50, key is test_row_0/A:col10/1733656868455/Put/seqid=0 2024-12-08T11:21:09,050 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/a71df23039a44d2399111f2c191a1dc1 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/a71df23039a44d2399111f2c191a1dc1 2024-12-08T11:21:09,057 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into a71df23039a44d2399111f2c191a1dc1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:09,057 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:09,057 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=13, startTime=1733656868936; duration=0sec 2024-12-08T11:21:09,057 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:09,057 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:09,057 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:09,063 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:09,063 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:09,063 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,063 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/093cede8b6b543179836b824fd43d43a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/b9a4213a31e644c0b74d2243a5b35e96, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/0d85b2d2d7824095bfeb2971bd09e082] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.7 K 2024-12-08T11:21:09,064 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 093cede8b6b543179836b824fd43d43a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656867077 2024-12-08T11:21:09,064 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b9a4213a31e644c0b74d2243a5b35e96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1733656867192 2024-12-08T11:21:09,065 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d85b2d2d7824095bfeb2971bd09e082, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733656867826 2024-12-08T11:21:09,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:09,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:09,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656929084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656929084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656929085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656929086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656929087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742153_1329 (size=12301) 2024-12-08T11:21:09,100 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/0213f823f4474070b6ecb7c76f1e3f10 2024-12-08T11:21:09,106 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:09,107 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/bd11c6db1b394719bc8c719d03084587 is 50, key is test_row_0/C:col10/1733656868436/Put/seqid=0 2024-12-08T11:21:09,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/50f1c1ba973f429d80f827fd830dbffc is 50, key is test_row_0/B:col10/1733656868455/Put/seqid=0 2024-12-08T11:21:09,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742154_1330 (size=13085) 2024-12-08T11:21:09,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742155_1331 (size=12301) 2024-12-08T11:21:09,171 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/50f1c1ba973f429d80f827fd830dbffc 2024-12-08T11:21:09,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656929189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656929189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656929192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656929192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656929192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/3cbb89e1ba7242b9855372e1f098377b is 50, key is test_row_0/C:col10/1733656868455/Put/seqid=0 2024-12-08T11:21:09,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742156_1332 (size=12301) 2024-12-08T11:21:09,244 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/3cbb89e1ba7242b9855372e1f098377b 2024-12-08T11:21:09,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/0213f823f4474070b6ecb7c76f1e3f10 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0213f823f4474070b6ecb7c76f1e3f10 2024-12-08T11:21:09,255 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0213f823f4474070b6ecb7c76f1e3f10, entries=150, sequenceid=349, filesize=12.0 K 2024-12-08T11:21:09,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/50f1c1ba973f429d80f827fd830dbffc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/50f1c1ba973f429d80f827fd830dbffc 2024-12-08T11:21:09,264 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/50f1c1ba973f429d80f827fd830dbffc, entries=150, sequenceid=349, filesize=12.0 K 2024-12-08T11:21:09,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/3cbb89e1ba7242b9855372e1f098377b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/3cbb89e1ba7242b9855372e1f098377b 2024-12-08T11:21:09,271 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/3cbb89e1ba7242b9855372e1f098377b, entries=150, sequenceid=349, filesize=12.0 K 2024-12-08T11:21:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-08T11:21:09,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-08T11:21:09,276 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for eada905a64e5510a34d2159c0d8947a2 in 275ms, sequenceid=349, compaction requested=false 2024-12-08T11:21:09,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:09,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-08T11:21:09,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-08T11:21:09,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-08T11:21:09,279 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 892 msec 2024-12-08T11:21:09,281 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 897 msec 2024-12-08T11:21:09,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:09,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:21:09,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:09,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:09,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:09,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:09,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:09,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:09,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/ecf43a40a1744904951e945baf7a1a4f is 50, key is test_row_0/A:col10/1733656869084/Put/seqid=0 2024-12-08T11:21:09,428 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656929424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656929424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656929425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656929427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,437 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3ff337b83ff1410b89b017480440a416 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3ff337b83ff1410b89b017480440a416 2024-12-08T11:21:09,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656929432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,442 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into 3ff337b83ff1410b89b017480440a416(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:09,442 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:09,442 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=13, startTime=1733656868936; duration=0sec 2024-12-08T11:21:09,443 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:09,443 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:09,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742157_1333 (size=12301) 2024-12-08T11:21:09,458 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/ecf43a40a1744904951e945baf7a1a4f 2024-12-08T11:21:09,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/444c2d7e6d184257a2a85e9076887b8a is 50, key is test_row_0/B:col10/1733656869084/Put/seqid=0 2024-12-08T11:21:09,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-08T11:21:09,488 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-08T11:21:09,489 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:09,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-08T11:21:09,491 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:09,491 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:09,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:09,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T11:21:09,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742158_1334 (size=12301) 2024-12-08T11:21:09,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656929530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656929530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656929531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656929531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656929538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,552 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/bd11c6db1b394719bc8c719d03084587 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bd11c6db1b394719bc8c719d03084587 2024-12-08T11:21:09,558 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into bd11c6db1b394719bc8c719d03084587(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:09,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:09,558 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=13, startTime=1733656868936; duration=0sec 2024-12-08T11:21:09,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:09,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:09,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T11:21:09,643 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:09,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:09,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656929733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656929733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656929733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656929733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:09,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656929741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T11:21:09,796 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:09,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:09,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/444c2d7e6d184257a2a85e9076887b8a 2024-12-08T11:21:09,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/077af1a19c014929b92dffbfd88f4041 is 50, key is test_row_0/C:col10/1733656869084/Put/seqid=0 2024-12-08T11:21:09,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742159_1335 (size=12301) 2024-12-08T11:21:09,938 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/077af1a19c014929b92dffbfd88f4041 2024-12-08T11:21:09,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/ecf43a40a1744904951e945baf7a1a4f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/ecf43a40a1744904951e945baf7a1a4f 2024-12-08T11:21:09,947 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/ecf43a40a1744904951e945baf7a1a4f, entries=150, sequenceid=372, filesize=12.0 K 2024-12-08T11:21:09,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/444c2d7e6d184257a2a85e9076887b8a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/444c2d7e6d184257a2a85e9076887b8a 2024-12-08T11:21:09,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:09,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:09,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:09,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:09,953 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/444c2d7e6d184257a2a85e9076887b8a, entries=150, sequenceid=372, filesize=12.0 K 2024-12-08T11:21:09,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/077af1a19c014929b92dffbfd88f4041 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/077af1a19c014929b92dffbfd88f4041 2024-12-08T11:21:09,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/077af1a19c014929b92dffbfd88f4041, entries=150, sequenceid=372, filesize=12.0 K 2024-12-08T11:21:09,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for eada905a64e5510a34d2159c0d8947a2 in 561ms, sequenceid=372, compaction requested=true 2024-12-08T11:21:09,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:09,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:09,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:09,958 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:09,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:09,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:09,958 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:09,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:09,958 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:09,960 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:09,960 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:09,960 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,960 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/a71df23039a44d2399111f2c191a1dc1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/50f1c1ba973f429d80f827fd830dbffc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/444c2d7e6d184257a2a85e9076887b8a] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.8 K 2024-12-08T11:21:09,960 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a71df23039a44d2399111f2c191a1dc1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733656867826 2024-12-08T11:21:09,961 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:09,961 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:09,961 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 50f1c1ba973f429d80f827fd830dbffc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1733656868455 2024-12-08T11:21:09,961 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:09,961 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3ff337b83ff1410b89b017480440a416, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0213f823f4474070b6ecb7c76f1e3f10, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/ecf43a40a1744904951e945baf7a1a4f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.8 K 2024-12-08T11:21:09,962 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3ff337b83ff1410b89b017480440a416, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733656867826 2024-12-08T11:21:09,962 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 444c2d7e6d184257a2a85e9076887b8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656869084 2024-12-08T11:21:09,966 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0213f823f4474070b6ecb7c76f1e3f10, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1733656868455 2024-12-08T11:21:09,967 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting ecf43a40a1744904951e945baf7a1a4f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656869084 2024-12-08T11:21:09,994 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:09,995 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/94cac8cf08f1450aac2409204c868790 is 50, key is test_row_0/B:col10/1733656869084/Put/seqid=0 2024-12-08T11:21:10,002 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#289 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:10,003 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/aef13fc4a6074ee1a20f7edb5b1ffaa8 is 50, key is test_row_0/A:col10/1733656869084/Put/seqid=0 2024-12-08T11:21:10,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742160_1336 (size=13187) 2024-12-08T11:21:10,031 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/94cac8cf08f1450aac2409204c868790 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/94cac8cf08f1450aac2409204c868790 2024-12-08T11:21:10,038 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into 94cac8cf08f1450aac2409204c868790(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:10,038 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:10,038 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=13, startTime=1733656869958; duration=0sec 2024-12-08T11:21:10,038 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:10,038 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:10,039 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:10,041 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:10,041 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:10,041 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,041 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bd11c6db1b394719bc8c719d03084587, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/3cbb89e1ba7242b9855372e1f098377b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/077af1a19c014929b92dffbfd88f4041] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=36.8 K 2024-12-08T11:21:10,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:10,042 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T11:21:10,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:10,042 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting bd11c6db1b394719bc8c719d03084587, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1733656867826 2024-12-08T11:21:10,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:10,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:10,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:10,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:10,042 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:10,043 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3cbb89e1ba7242b9855372e1f098377b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=349, earliestPutTs=1733656868455 2024-12-08T11:21:10,044 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 077af1a19c014929b92dffbfd88f4041, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656869084 2024-12-08T11:21:10,054 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/cb336533bbd448e9af23c7d742823c25 is 50, key is test_row_0/A:col10/1733656869409/Put/seqid=0 2024-12-08T11:21:10,062 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#291 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:10,062 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/95b238f4838a4f39ac3980908f6ec412 is 50, key is test_row_0/C:col10/1733656869084/Put/seqid=0 2024-12-08T11:21:10,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742161_1337 (size=13187) 2024-12-08T11:21:10,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656930061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656930064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656930069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,073 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656930069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656930069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,078 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/aef13fc4a6074ee1a20f7edb5b1ffaa8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/aef13fc4a6074ee1a20f7edb5b1ffaa8 2024-12-08T11:21:10,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742162_1338 (size=14741) 2024-12-08T11:21:10,090 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into aef13fc4a6074ee1a20f7edb5b1ffaa8(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:10,090 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:10,091 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=13, startTime=1733656869958; duration=0sec 2024-12-08T11:21:10,091 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:10,091 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:10,091 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/cb336533bbd448e9af23c7d742823c25 2024-12-08T11:21:10,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T11:21:10,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/55ffddfde2c54964bf40288dab4d9f88 is 50, key is test_row_0/B:col10/1733656869409/Put/seqid=0 2024-12-08T11:21:10,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742163_1339 (size=13187) 2024-12-08T11:21:10,123 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/95b238f4838a4f39ac3980908f6ec412 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/95b238f4838a4f39ac3980908f6ec412 2024-12-08T11:21:10,131 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into 95b238f4838a4f39ac3980908f6ec412(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:10,131 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:10,131 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=13, startTime=1733656869958; duration=0sec 2024-12-08T11:21:10,131 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:10,131 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:10,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742164_1340 (size=12301) 2024-12-08T11:21:10,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656930171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656930171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656930174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656930176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656930176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,256 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:10,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,256 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656930374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656930375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656930376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,380 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656930379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656930379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,408 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:10,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:10,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,542 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/55ffddfde2c54964bf40288dab4d9f88 2024-12-08T11:21:10,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/374fa245d93a4714bae09ba77b9ff98a is 50, key is test_row_0/C:col10/1733656869409/Put/seqid=0 2024-12-08T11:21:10,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742165_1341 (size=12301) 2024-12-08T11:21:10,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T11:21:10,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656930677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656930679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656930681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656930681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:10,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656930683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,723 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,724 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,876 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:10,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:10,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:10,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:10,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:10,980 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=391 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/374fa245d93a4714bae09ba77b9ff98a 2024-12-08T11:21:10,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/cb336533bbd448e9af23c7d742823c25 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/cb336533bbd448e9af23c7d742823c25 2024-12-08T11:21:10,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/cb336533bbd448e9af23c7d742823c25, entries=200, sequenceid=391, filesize=14.4 K 2024-12-08T11:21:10,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/55ffddfde2c54964bf40288dab4d9f88 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/55ffddfde2c54964bf40288dab4d9f88 2024-12-08T11:21:11,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/55ffddfde2c54964bf40288dab4d9f88, entries=150, sequenceid=391, filesize=12.0 K 2024-12-08T11:21:11,001 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/374fa245d93a4714bae09ba77b9ff98a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/374fa245d93a4714bae09ba77b9ff98a 2024-12-08T11:21:11,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/374fa245d93a4714bae09ba77b9ff98a, entries=150, sequenceid=391, filesize=12.0 K 2024-12-08T11:21:11,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for eada905a64e5510a34d2159c0d8947a2 in 962ms, sequenceid=391, compaction requested=false 2024-12-08T11:21:11,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:11,030 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-08T11:21:11,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:11,031 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-08T11:21:11,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:11,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:11,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:11,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:11,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:11,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:11,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/a8f39d3ee9814ddd9b203145ef34878c is 50, key is test_row_0/A:col10/1733656870066/Put/seqid=0 2024-12-08T11:21:11,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742166_1342 (size=12301) 2024-12-08T11:21:11,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:11,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:11,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656931191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656931191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656931192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656931193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656931194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656931295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656931295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656931296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656931296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656931297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,441 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/a8f39d3ee9814ddd9b203145ef34878c 2024-12-08T11:21:11,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/35cccdcd9afd45deb23bbbb661793ac1 is 50, key is test_row_0/B:col10/1733656870066/Put/seqid=0 2024-12-08T11:21:11,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742167_1343 (size=12301) 2024-12-08T11:21:11,499 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656931498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656931499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656931499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656931499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656931500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T11:21:11,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656931802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656931803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,805 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656931803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656931804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656931806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:11,854 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/35cccdcd9afd45deb23bbbb661793ac1 2024-12-08T11:21:11,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b is 50, key is test_row_0/C:col10/1733656870066/Put/seqid=0 2024-12-08T11:21:11,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742168_1344 (size=12301) 2024-12-08T11:21:11,866 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=414 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b 2024-12-08T11:21:11,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/a8f39d3ee9814ddd9b203145ef34878c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a8f39d3ee9814ddd9b203145ef34878c 2024-12-08T11:21:11,875 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a8f39d3ee9814ddd9b203145ef34878c, entries=150, sequenceid=414, filesize=12.0 K 2024-12-08T11:21:11,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/35cccdcd9afd45deb23bbbb661793ac1 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/35cccdcd9afd45deb23bbbb661793ac1 2024-12-08T11:21:11,880 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/35cccdcd9afd45deb23bbbb661793ac1, entries=150, sequenceid=414, filesize=12.0 K 2024-12-08T11:21:11,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b 2024-12-08T11:21:11,885 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b, entries=150, sequenceid=414, filesize=12.0 K 2024-12-08T11:21:11,886 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for eada905a64e5510a34d2159c0d8947a2 in 855ms, sequenceid=414, compaction requested=true 2024-12-08T11:21:11,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:11,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:11,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-08T11:21:11,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-08T11:21:11,888 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-08T11:21:11,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3960 sec 2024-12-08T11:21:11,890 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.4000 sec 2024-12-08T11:21:12,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:12,306 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T11:21:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:12,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:12,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/87a6e5a268324e789f65375d628339d9 is 50, key is test_row_0/A:col10/1733656872305/Put/seqid=0 2024-12-08T11:21:12,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742169_1345 (size=12301) 2024-12-08T11:21:12,327 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656932323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656932325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656932326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656932327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656932327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,429 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656932428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,433 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656932431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656932432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656932432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656932433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656932633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656932634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656932635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656932635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656932636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/87a6e5a268324e789f65375d628339d9 2024-12-08T11:21:12,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/9a928359922647a291eb57b334720abd is 50, key is test_row_0/B:col10/1733656872305/Put/seqid=0 2024-12-08T11:21:12,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742170_1346 (size=12301) 2024-12-08T11:21:12,754 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/9a928359922647a291eb57b334720abd 2024-12-08T11:21:12,770 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/9feedd7451c9492183dbbd47ad2e72bc is 50, key is test_row_0/C:col10/1733656872305/Put/seqid=0 2024-12-08T11:21:12,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742171_1347 (size=12301) 2024-12-08T11:21:12,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656932935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656932938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656932938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656932938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:12,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:12,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656932939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,178 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/9feedd7451c9492183dbbd47ad2e72bc 2024-12-08T11:21:13,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/87a6e5a268324e789f65375d628339d9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/87a6e5a268324e789f65375d628339d9 2024-12-08T11:21:13,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/87a6e5a268324e789f65375d628339d9, entries=150, sequenceid=430, filesize=12.0 K 2024-12-08T11:21:13,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/9a928359922647a291eb57b334720abd as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a928359922647a291eb57b334720abd 2024-12-08T11:21:13,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a928359922647a291eb57b334720abd, entries=150, sequenceid=430, filesize=12.0 K 2024-12-08T11:21:13,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/9feedd7451c9492183dbbd47ad2e72bc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/9feedd7451c9492183dbbd47ad2e72bc 2024-12-08T11:21:13,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/9feedd7451c9492183dbbd47ad2e72bc, entries=150, sequenceid=430, filesize=12.0 K 2024-12-08T11:21:13,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for eada905a64e5510a34d2159c0d8947a2 in 897ms, sequenceid=430, compaction requested=true 2024-12-08T11:21:13,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:13,204 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:13,204 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:13,205 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:13,205 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:13,205 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52530 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:13,206 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:13,206 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:13,206 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/aef13fc4a6074ee1a20f7edb5b1ffaa8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/cb336533bbd448e9af23c7d742823c25, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a8f39d3ee9814ddd9b203145ef34878c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/87a6e5a268324e789f65375d628339d9] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=51.3 K 2024-12-08T11:21:13,206 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:13,206 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting aef13fc4a6074ee1a20f7edb5b1ffaa8, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656869084 2024-12-08T11:21:13,206 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:13,207 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:13,207 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:13,207 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/94cac8cf08f1450aac2409204c868790, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/55ffddfde2c54964bf40288dab4d9f88, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/35cccdcd9afd45deb23bbbb661793ac1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a928359922647a291eb57b334720abd] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=48.9 K 2024-12-08T11:21:13,207 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 94cac8cf08f1450aac2409204c868790, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656869084 2024-12-08T11:21:13,207 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb336533bbd448e9af23c7d742823c25, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733656869409 2024-12-08T11:21:13,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:13,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:13,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:13,208 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 55ffddfde2c54964bf40288dab4d9f88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733656869409 2024-12-08T11:21:13,208 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 35cccdcd9afd45deb23bbbb661793ac1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733656870058 2024-12-08T11:21:13,208 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a8f39d3ee9814ddd9b203145ef34878c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733656870058 2024-12-08T11:21:13,208 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a928359922647a291eb57b334720abd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733656871187 2024-12-08T11:21:13,209 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87a6e5a268324e789f65375d628339d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733656871187 2024-12-08T11:21:13,231 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#300 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:13,231 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3f59cc457f674d12ae8de1534ed5d910 is 50, key is test_row_0/A:col10/1733656872305/Put/seqid=0 2024-12-08T11:21:13,239 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#301 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:13,239 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2e7ebcde551149b8bfe47873796f46ac is 50, key is test_row_0/B:col10/1733656872305/Put/seqid=0 2024-12-08T11:21:13,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742172_1348 (size=13323) 2024-12-08T11:21:13,282 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/3f59cc457f674d12ae8de1534ed5d910 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3f59cc457f674d12ae8de1534ed5d910 2024-12-08T11:21:13,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742173_1349 (size=13323) 2024-12-08T11:21:13,289 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into 3f59cc457f674d12ae8de1534ed5d910(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:13,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:13,289 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=12, startTime=1733656873204; duration=0sec 2024-12-08T11:21:13,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:13,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:13,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:13,290 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:13,290 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:13,290 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:13,290 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/95b238f4838a4f39ac3980908f6ec412, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/374fa245d93a4714bae09ba77b9ff98a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/9feedd7451c9492183dbbd47ad2e72bc] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=48.9 K 2024-12-08T11:21:13,291 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95b238f4838a4f39ac3980908f6ec412, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1733656869084 2024-12-08T11:21:13,291 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 374fa245d93a4714bae09ba77b9ff98a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=391, earliestPutTs=1733656869409 2024-12-08T11:21:13,291 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ebf2ab2f7f14fcbae6b6d4ce632eb2b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=414, earliestPutTs=1733656870058 2024-12-08T11:21:13,292 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9feedd7451c9492183dbbd47ad2e72bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733656871187 2024-12-08T11:21:13,301 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:13,301 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/4fa0c0a56abc403f8748a791cfd6668b is 50, key is test_row_0/C:col10/1733656872305/Put/seqid=0 2024-12-08T11:21:13,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742174_1350 (size=13323) 2024-12-08T11:21:13,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:13,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:21:13,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:13,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:13,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:13,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:13,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:13,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:13,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/49b563c8852a4a8ab5869c1b8ba67640 is 50, key is test_row_0/A:col10/1733656872326/Put/seqid=0 2024-12-08T11:21:13,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742175_1351 (size=14741) 2024-12-08T11:21:13,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656933454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656933454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656933454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656933455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,458 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656933455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656933559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656933559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656933559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656933559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656933559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-08T11:21:13,600 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-08T11:21:13,602 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:13,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-12-08T11:21:13,603 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:13,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T11:21:13,604 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:13,604 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:13,688 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/2e7ebcde551149b8bfe47873796f46ac as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2e7ebcde551149b8bfe47873796f46ac 2024-12-08T11:21:13,693 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into 2e7ebcde551149b8bfe47873796f46ac(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:13,693 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:13,693 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=12, startTime=1733656873205; duration=0sec 2024-12-08T11:21:13,693 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:13,693 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:13,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T11:21:13,710 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/4fa0c0a56abc403f8748a791cfd6668b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/4fa0c0a56abc403f8748a791cfd6668b 2024-12-08T11:21:13,717 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into 4fa0c0a56abc403f8748a791cfd6668b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:13,717 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:13,717 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=12, startTime=1733656873207; duration=0sec 2024-12-08T11:21:13,717 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:13,717 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:13,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,756 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T11:21:13,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:13,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:13,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:13,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:13,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:13,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:13,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656933761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656933769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656933770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656933771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:13,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656933771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/49b563c8852a4a8ab5869c1b8ba67640 2024-12-08T11:21:13,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/025c4692696e49a399ba7cc2d78d77b3 is 50, key is test_row_0/B:col10/1733656872326/Put/seqid=0 2024-12-08T11:21:13,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742176_1352 (size=12301) 2024-12-08T11:21:13,865 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/025c4692696e49a399ba7cc2d78d77b3 2024-12-08T11:21:13,872 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2bde2118ad0a42f2936468664602a11a is 50, key is test_row_0/C:col10/1733656872326/Put/seqid=0 2024-12-08T11:21:13,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742177_1353 (size=12301) 2024-12-08T11:21:13,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T11:21:13,909 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:13,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T11:21:13,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:13,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:13,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:13,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:13,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:13,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,062 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T11:21:14,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:14,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656934065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656934071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656934071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656934072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656934073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T11:21:14,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T11:21:14,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:14,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=453 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2bde2118ad0a42f2936468664602a11a 2024-12-08T11:21:14,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/49b563c8852a4a8ab5869c1b8ba67640 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/49b563c8852a4a8ab5869c1b8ba67640 2024-12-08T11:21:14,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/49b563c8852a4a8ab5869c1b8ba67640, entries=200, sequenceid=453, filesize=14.4 K 2024-12-08T11:21:14,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/025c4692696e49a399ba7cc2d78d77b3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/025c4692696e49a399ba7cc2d78d77b3 2024-12-08T11:21:14,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/025c4692696e49a399ba7cc2d78d77b3, entries=150, sequenceid=453, filesize=12.0 K 2024-12-08T11:21:14,295 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/2bde2118ad0a42f2936468664602a11a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2bde2118ad0a42f2936468664602a11a 2024-12-08T11:21:14,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2bde2118ad0a42f2936468664602a11a, entries=150, sequenceid=453, filesize=12.0 K 2024-12-08T11:21:14,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for eada905a64e5510a34d2159c0d8947a2 in 857ms, sequenceid=453, compaction requested=false 2024-12-08T11:21:14,301 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:14,367 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-12-08T11:21:14,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,368 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T11:21:14,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:14,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:14,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:14,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:14,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/88c96ab8b8f54a5ab39799232e506c68 is 50, key is test_row_0/A:col10/1733656873453/Put/seqid=0 2024-12-08T11:21:14,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742178_1354 (size=12301) 2024-12-08T11:21:14,393 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/88c96ab8b8f54a5ab39799232e506c68 2024-12-08T11:21:14,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/bb61284ebd1247e483e8ca1df94c10f8 is 50, key is test_row_0/B:col10/1733656873453/Put/seqid=0 2024-12-08T11:21:14,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742179_1355 (size=12301) 2024-12-08T11:21:14,420 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/bb61284ebd1247e483e8ca1df94c10f8 2024-12-08T11:21:14,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/64756b1b333249068d25ee75a31a9e54 is 50, key is test_row_0/C:col10/1733656873453/Put/seqid=0 2024-12-08T11:21:14,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742180_1356 (size=12301) 2024-12-08T11:21:14,459 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=469 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/64756b1b333249068d25ee75a31a9e54 2024-12-08T11:21:14,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/88c96ab8b8f54a5ab39799232e506c68 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/88c96ab8b8f54a5ab39799232e506c68 2024-12-08T11:21:14,469 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/88c96ab8b8f54a5ab39799232e506c68, entries=150, sequenceid=469, filesize=12.0 K 2024-12-08T11:21:14,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/bb61284ebd1247e483e8ca1df94c10f8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/bb61284ebd1247e483e8ca1df94c10f8 2024-12-08T11:21:14,476 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/bb61284ebd1247e483e8ca1df94c10f8, entries=150, sequenceid=469, filesize=12.0 K 2024-12-08T11:21:14,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/64756b1b333249068d25ee75a31a9e54 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/64756b1b333249068d25ee75a31a9e54 2024-12-08T11:21:14,481 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/64756b1b333249068d25ee75a31a9e54, entries=150, sequenceid=469, filesize=12.0 K 2024-12-08T11:21:14,481 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for eada905a64e5510a34d2159c0d8947a2 in 113ms, sequenceid=469, compaction requested=true 2024-12-08T11:21:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-08T11:21:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-08T11:21:14,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-08T11:21:14,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 879 msec 2024-12-08T11:21:14,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 883 msec 2024-12-08T11:21:14,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:14,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:21:14,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:14,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:14,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:14,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:14,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:14,600 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:14,608 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/7c4f765f2ff4411185dbdead6c030749 is 50, key is test_row_0/A:col10/1733656874598/Put/seqid=0 2024-12-08T11:21:14,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742181_1357 (size=12301) 2024-12-08T11:21:14,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656934621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656934622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656934623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656934624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656934624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-08T11:21:14,706 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-08T11:21:14,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:14,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-12-08T11:21:14,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T11:21:14,710 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:14,711 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:14,711 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:14,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656934726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656934728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656934728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656934729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656934729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T11:21:14,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T11:21:14,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:14,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:14,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:14,930 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656934928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656934932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656934932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656934932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:14,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:14,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656934941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,016 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/7c4f765f2ff4411185dbdead6c030749 2024-12-08T11:21:15,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T11:21:15,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T11:21:15,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:15,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/7484025662794de49172130bff05d34d is 50, key is test_row_0/B:col10/1733656874598/Put/seqid=0 2024-12-08T11:21:15,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742182_1358 (size=12301) 2024-12-08T11:21:15,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T11:21:15,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:15,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:15,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34822 deadline: 1733656935233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34806 deadline: 1733656935234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34808 deadline: 1733656935236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,238 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:15,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34812 deadline: 1733656935236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:15,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:34768 deadline: 1733656935246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T11:21:15,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T11:21:15,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:15,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,324 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/7484025662794de49172130bff05d34d 2024-12-08T11:21:15,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/5a0ffba9e952415ea35a77346c05e9e3 is 50, key is test_row_0/C:col10/1733656874598/Put/seqid=0 2024-12-08T11:21:15,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742183_1359 (size=12301) 2024-12-08T11:21:15,449 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=483 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/5a0ffba9e952415ea35a77346c05e9e3 2024-12-08T11:21:15,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/7c4f765f2ff4411185dbdead6c030749 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7c4f765f2ff4411185dbdead6c030749 2024-12-08T11:21:15,457 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7c4f765f2ff4411185dbdead6c030749, entries=150, sequenceid=483, filesize=12.0 K 2024-12-08T11:21:15,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/7484025662794de49172130bff05d34d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7484025662794de49172130bff05d34d 2024-12-08T11:21:15,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7484025662794de49172130bff05d34d, entries=150, sequenceid=483, filesize=12.0 K 2024-12-08T11:21:15,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/5a0ffba9e952415ea35a77346c05e9e3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/5a0ffba9e952415ea35a77346c05e9e3 2024-12-08T11:21:15,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,477 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T11:21:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,477 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/5a0ffba9e952415ea35a77346c05e9e3, entries=150, sequenceid=483, filesize=12.0 K 2024-12-08T11:21:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] handler.RSProcedureHandler(58): pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=90 java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,478 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for eada905a64e5510a34d2159c0d8947a2 in 880ms, sequenceid=483, compaction requested=true 2024-12-08T11:21:15,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:15,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:15,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:15,478 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:15,478 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:15,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=90 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:15,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:15,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:15,478 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store eada905a64e5510a34d2159c0d8947a2:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:15,479 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:15,479 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52666 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:15,479 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:15,479 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/A is initiating minor compaction (all files) 2024-12-08T11:21:15,479 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/B is initiating minor compaction (all files) 2024-12-08T11:21:15,479 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/A in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,479 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/B in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,480 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3f59cc457f674d12ae8de1534ed5d910, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/49b563c8852a4a8ab5869c1b8ba67640, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/88c96ab8b8f54a5ab39799232e506c68, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7c4f765f2ff4411185dbdead6c030749] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=51.4 K 2024-12-08T11:21:15,480 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2e7ebcde551149b8bfe47873796f46ac, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/025c4692696e49a399ba7cc2d78d77b3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/bb61284ebd1247e483e8ca1df94c10f8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7484025662794de49172130bff05d34d] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=49.0 K 2024-12-08T11:21:15,480 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f59cc457f674d12ae8de1534ed5d910, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733656871187 2024-12-08T11:21:15,480 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e7ebcde551149b8bfe47873796f46ac, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733656871187 2024-12-08T11:21:15,480 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49b563c8852a4a8ab5869c1b8ba67640, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1733656872325 2024-12-08T11:21:15,480 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 025c4692696e49a399ba7cc2d78d77b3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1733656872326 2024-12-08T11:21:15,481 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 88c96ab8b8f54a5ab39799232e506c68, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1733656873453 2024-12-08T11:21:15,481 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting bb61284ebd1247e483e8ca1df94c10f8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1733656873453 2024-12-08T11:21:15,481 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c4f765f2ff4411185dbdead6c030749, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1733656874596 2024-12-08T11:21:15,481 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7484025662794de49172130bff05d34d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1733656874596 2024-12-08T11:21:15,492 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#B#compaction#312 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:15,492 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/8cc4528adfce4a578cb1dacc37447336 is 50, key is test_row_0/B:col10/1733656874598/Put/seqid=0 2024-12-08T11:21:15,493 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#A#compaction#313 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:15,493 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/dfffe8db77f54c5797b527113618ea81 is 50, key is test_row_0/A:col10/1733656874598/Put/seqid=0 2024-12-08T11:21:15,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742184_1360 (size=13459) 2024-12-08T11:21:15,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742185_1361 (size=13459) 2024-12-08T11:21:15,505 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/dfffe8db77f54c5797b527113618ea81 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/dfffe8db77f54c5797b527113618ea81 2024-12-08T11:21:15,505 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/8cc4528adfce4a578cb1dacc37447336 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8cc4528adfce4a578cb1dacc37447336 2024-12-08T11:21:15,513 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/B of eada905a64e5510a34d2159c0d8947a2 into 8cc4528adfce4a578cb1dacc37447336(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:15,513 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:15,513 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/B, priority=12, startTime=1733656875478; duration=0sec 2024-12-08T11:21:15,513 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:15,513 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:B 2024-12-08T11:21:15,514 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:15,515 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:15,515 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): eada905a64e5510a34d2159c0d8947a2/C is initiating minor compaction (all files) 2024-12-08T11:21:15,515 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/A of eada905a64e5510a34d2159c0d8947a2 into dfffe8db77f54c5797b527113618ea81(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:15,515 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of eada905a64e5510a34d2159c0d8947a2/C in TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,515 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:15,515 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/A, priority=12, startTime=1733656875478; duration=0sec 2024-12-08T11:21:15,515 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/4fa0c0a56abc403f8748a791cfd6668b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2bde2118ad0a42f2936468664602a11a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/64756b1b333249068d25ee75a31a9e54, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/5a0ffba9e952415ea35a77346c05e9e3] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp, totalSize=49.0 K 2024-12-08T11:21:15,515 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:15,515 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:A 2024-12-08T11:21:15,516 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fa0c0a56abc403f8748a791cfd6668b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=430, earliestPutTs=1733656871187 2024-12-08T11:21:15,516 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bde2118ad0a42f2936468664602a11a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=453, earliestPutTs=1733656872326 2024-12-08T11:21:15,516 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 64756b1b333249068d25ee75a31a9e54, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=469, earliestPutTs=1733656873453 2024-12-08T11:21:15,517 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a0ffba9e952415ea35a77346c05e9e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=483, earliestPutTs=1733656874596 2024-12-08T11:21:15,526 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): eada905a64e5510a34d2159c0d8947a2#C#compaction#314 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:15,527 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/ee91f68625104b0d9a6879b15edd8be1 is 50, key is test_row_0/C:col10/1733656874598/Put/seqid=0 2024-12-08T11:21:15,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742186_1362 (size=13459) 2024-12-08T11:21:15,537 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/ee91f68625104b0d9a6879b15edd8be1 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/ee91f68625104b0d9a6879b15edd8be1 2024-12-08T11:21:15,542 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in eada905a64e5510a34d2159c0d8947a2/C of eada905a64e5510a34d2159c0d8947a2 into ee91f68625104b0d9a6879b15edd8be1(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:15,542 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:15,542 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2., storeName=eada905a64e5510a34d2159c0d8947a2/C, priority=12, startTime=1733656875478; duration=0sec 2024-12-08T11:21:15,543 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:15,543 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: eada905a64e5510a34d2159c0d8947a2:C 2024-12-08T11:21:15,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:15,630 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-12-08T11:21:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:15,630 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:21:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:15,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:15,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:15,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:15,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/2fc77d29a5bb45938d11d71665fd3f87 is 50, key is test_row_0/A:col10/1733656874623/Put/seqid=0 2024-12-08T11:21:15,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742187_1363 (size=12301) 2024-12-08T11:21:15,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:15,739 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. as already flushing 2024-12-08T11:21:15,745 DEBUG [Thread-1176 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a569490 to 127.0.0.1:63801 2024-12-08T11:21:15,745 DEBUG [Thread-1176 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,745 DEBUG [Thread-1187 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fef31f8 to 127.0.0.1:63801 2024-12-08T11:21:15,745 DEBUG [Thread-1193 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:63801 2024-12-08T11:21:15,745 DEBUG [Thread-1193 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,745 DEBUG [Thread-1187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,746 DEBUG [Thread-1182 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:63801 2024-12-08T11:21:15,746 DEBUG [Thread-1182 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,746 DEBUG [Thread-1191 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:63801 2024-12-08T11:21:15,747 DEBUG [Thread-1191 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,747 DEBUG [Thread-1185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e9ae050 to 127.0.0.1:63801 2024-12-08T11:21:15,747 DEBUG [Thread-1185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,748 DEBUG [Thread-1178 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:63801 2024-12-08T11:21:15,748 DEBUG [Thread-1180 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:63801 2024-12-08T11:21:15,748 DEBUG [Thread-1178 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,748 DEBUG [Thread-1180 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,751 DEBUG [Thread-1174 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53bfce45 to 127.0.0.1:63801 2024-12-08T11:21:15,751 DEBUG [Thread-1174 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,753 DEBUG [Thread-1189 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:63801 2024-12-08T11:21:15,753 DEBUG [Thread-1189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:15,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T11:21:16,054 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/2fc77d29a5bb45938d11d71665fd3f87 2024-12-08T11:21:16,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/7be11b8a40b8401e9d277b21f2834f7c is 50, key is test_row_0/B:col10/1733656874623/Put/seqid=0 2024-12-08T11:21:16,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742188_1364 (size=12301) 2024-12-08T11:21:16,469 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/7be11b8a40b8401e9d277b21f2834f7c 2024-12-08T11:21:16,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/cbe89d05109a490e8296799f22873de9 is 50, key is test_row_0/C:col10/1733656874623/Put/seqid=0 2024-12-08T11:21:16,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742189_1365 (size=12301) 2024-12-08T11:21:16,482 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/cbe89d05109a490e8296799f22873de9 2024-12-08T11:21:16,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/2fc77d29a5bb45938d11d71665fd3f87 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2fc77d29a5bb45938d11d71665fd3f87 2024-12-08T11:21:16,491 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2fc77d29a5bb45938d11d71665fd3f87, entries=150, sequenceid=508, filesize=12.0 K 2024-12-08T11:21:16,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/7be11b8a40b8401e9d277b21f2834f7c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7be11b8a40b8401e9d277b21f2834f7c 2024-12-08T11:21:16,495 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7be11b8a40b8401e9d277b21f2834f7c, entries=150, sequenceid=508, filesize=12.0 K 2024-12-08T11:21:16,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/cbe89d05109a490e8296799f22873de9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/cbe89d05109a490e8296799f22873de9 2024-12-08T11:21:16,499 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/cbe89d05109a490e8296799f22873de9, entries=150, sequenceid=508, filesize=12.0 K 2024-12-08T11:21:16,500 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for eada905a64e5510a34d2159c0d8947a2 in 870ms, sequenceid=508, compaction requested=false 2024-12-08T11:21:16,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:16,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:16,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-08T11:21:16,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-08T11:21:16,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-08T11:21:16,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7900 sec 2024-12-08T11:21:16,504 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.7950 sec 2024-12-08T11:21:16,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-08T11:21:16,820 INFO [Thread-1184 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 78 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6126 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6239 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5992 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6271 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6138 2024-12-08T11:21:16,820 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T11:21:16,820 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T11:21:16,820 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x783a99f7 to 127.0.0.1:63801 2024-12-08T11:21:16,820 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:16,821 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T11:21:16,821 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T11:21:16,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:16,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T11:21:16,824 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656876824"}]},"ts":"1733656876824"} 2024-12-08T11:21:16,825 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T11:21:16,827 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T11:21:16,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:21:16,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eada905a64e5510a34d2159c0d8947a2, UNASSIGN}] 2024-12-08T11:21:16,829 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=eada905a64e5510a34d2159c0d8947a2, UNASSIGN 2024-12-08T11:21:16,829 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=eada905a64e5510a34d2159c0d8947a2, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:16,830 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:21:16,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; CloseRegionProcedure eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:21:16,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T11:21:16,981 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:16,982 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(124): Close eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1681): Closing eada905a64e5510a34d2159c0d8947a2, disabling compactions & flushes 2024-12-08T11:21:16,982 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. after waiting 0 ms 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:16,982 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(2837): Flushing eada905a64e5510a34d2159c0d8947a2 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=A 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=B 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactingMemStore(205): FLUSHING TO DISK eada905a64e5510a34d2159c0d8947a2, store=C 2024-12-08T11:21:16,982 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:16,987 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/a42d0d2f00804c90a1c2e25ea50c9b86 is 50, key is test_row_0/A:col10/1733656875750/Put/seqid=0 2024-12-08T11:21:16,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742190_1366 (size=12301) 2024-12-08T11:21:17,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T11:21:17,391 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/a42d0d2f00804c90a1c2e25ea50c9b86 2024-12-08T11:21:17,397 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/18aadcea6c4d4cfda914616cb2a68983 is 50, key is test_row_0/B:col10/1733656875750/Put/seqid=0 2024-12-08T11:21:17,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742191_1367 (size=12301) 2024-12-08T11:21:17,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T11:21:17,801 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/18aadcea6c4d4cfda914616cb2a68983 2024-12-08T11:21:17,808 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/77222df8a81d4e5997560406a3a9bc13 is 50, key is test_row_0/C:col10/1733656875750/Put/seqid=0 2024-12-08T11:21:17,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742192_1368 (size=12301) 2024-12-08T11:21:17,817 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=522 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/77222df8a81d4e5997560406a3a9bc13 2024-12-08T11:21:17,821 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/A/a42d0d2f00804c90a1c2e25ea50c9b86 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a42d0d2f00804c90a1c2e25ea50c9b86 2024-12-08T11:21:17,825 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a42d0d2f00804c90a1c2e25ea50c9b86, entries=150, sequenceid=522, filesize=12.0 K 2024-12-08T11:21:17,825 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/B/18aadcea6c4d4cfda914616cb2a68983 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/18aadcea6c4d4cfda914616cb2a68983 2024-12-08T11:21:17,830 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/18aadcea6c4d4cfda914616cb2a68983, entries=150, sequenceid=522, filesize=12.0 K 2024-12-08T11:21:17,831 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/.tmp/C/77222df8a81d4e5997560406a3a9bc13 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/77222df8a81d4e5997560406a3a9bc13 2024-12-08T11:21:17,834 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/77222df8a81d4e5997560406a3a9bc13, entries=150, sequenceid=522, filesize=12.0 K 2024-12-08T11:21:17,835 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for eada905a64e5510a34d2159c0d8947a2 in 853ms, sequenceid=522, compaction requested=true 2024-12-08T11:21:17,835 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7f53e4716b38485f842bbd9f5a710ad4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/142c0f5788934c6aa5cdb2776cd3d2b5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8ea1da108578496e8e264d31f4d827f6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/5729e124e0bf493d9fc6654d2e103c18, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2a9b541f4bb14b06b7d8ceaf4fdeed45, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b6c0f6f49b904893988de6b8deb25744, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0f03918ea57146fa9a121f14e3a713c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/387ffd1f8a474f948c7a4def6ef89bdc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/051f22f19d6243b0a53fd07e016734d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/fe302cdf557446eca0b02a9c726bcf50, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8f0c97a82f0c4aad9112f1a1934981e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/03581e7f97b34f02adc4f5bd5069989e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/d55461b4353346768beed6a22f4b283b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1d6a09c6c6eb49cf95e08b0dacee6daa, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b362ea4ffe854029beae9c054e6baac2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3634c76c265e4809b63f327ba4830343, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3738ac535aa242d5bd41c82cbef6dbd8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1c336c78407c4397b700c03714f5acf6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/174f7e4e938f46b28b779895580e2028, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/51949c2a53df4b32a620260e113da4d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6795e4160db945e6ad8c15fbbf86a5b7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6b2b712c3adb47d08ab75f28a600069f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3ff337b83ff1410b89b017480440a416, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/073e1a1256b6448eb1e14aa3cdb0b3df, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0213f823f4474070b6ecb7c76f1e3f10, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/aef13fc4a6074ee1a20f7edb5b1ffaa8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/ecf43a40a1744904951e945baf7a1a4f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/cb336533bbd448e9af23c7d742823c25, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a8f39d3ee9814ddd9b203145ef34878c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3f59cc457f674d12ae8de1534ed5d910, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/87a6e5a268324e789f65375d628339d9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/49b563c8852a4a8ab5869c1b8ba67640, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/88c96ab8b8f54a5ab39799232e506c68, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7c4f765f2ff4411185dbdead6c030749] to archive 2024-12-08T11:21:17,836 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:17,838 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7f53e4716b38485f842bbd9f5a710ad4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7f53e4716b38485f842bbd9f5a710ad4 2024-12-08T11:21:17,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/142c0f5788934c6aa5cdb2776cd3d2b5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/142c0f5788934c6aa5cdb2776cd3d2b5 2024-12-08T11:21:17,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8ea1da108578496e8e264d31f4d827f6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8ea1da108578496e8e264d31f4d827f6 2024-12-08T11:21:17,840 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/5729e124e0bf493d9fc6654d2e103c18 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/5729e124e0bf493d9fc6654d2e103c18 2024-12-08T11:21:17,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2a9b541f4bb14b06b7d8ceaf4fdeed45 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2a9b541f4bb14b06b7d8ceaf4fdeed45 2024-12-08T11:21:17,842 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b6c0f6f49b904893988de6b8deb25744 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b6c0f6f49b904893988de6b8deb25744 2024-12-08T11:21:17,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0f03918ea57146fa9a121f14e3a713c8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0f03918ea57146fa9a121f14e3a713c8 2024-12-08T11:21:17,844 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/387ffd1f8a474f948c7a4def6ef89bdc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/387ffd1f8a474f948c7a4def6ef89bdc 2024-12-08T11:21:17,844 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/051f22f19d6243b0a53fd07e016734d5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/051f22f19d6243b0a53fd07e016734d5 2024-12-08T11:21:17,845 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/fe302cdf557446eca0b02a9c726bcf50 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/fe302cdf557446eca0b02a9c726bcf50 2024-12-08T11:21:17,846 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8f0c97a82f0c4aad9112f1a1934981e8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/8f0c97a82f0c4aad9112f1a1934981e8 2024-12-08T11:21:17,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/03581e7f97b34f02adc4f5bd5069989e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/03581e7f97b34f02adc4f5bd5069989e 2024-12-08T11:21:17,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/d55461b4353346768beed6a22f4b283b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/d55461b4353346768beed6a22f4b283b 2024-12-08T11:21:17,848 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1d6a09c6c6eb49cf95e08b0dacee6daa to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1d6a09c6c6eb49cf95e08b0dacee6daa 2024-12-08T11:21:17,849 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b362ea4ffe854029beae9c054e6baac2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/b362ea4ffe854029beae9c054e6baac2 2024-12-08T11:21:17,850 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3634c76c265e4809b63f327ba4830343 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3634c76c265e4809b63f327ba4830343 2024-12-08T11:21:17,851 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3738ac535aa242d5bd41c82cbef6dbd8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3738ac535aa242d5bd41c82cbef6dbd8 2024-12-08T11:21:17,852 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1c336c78407c4397b700c03714f5acf6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/1c336c78407c4397b700c03714f5acf6 2024-12-08T11:21:17,853 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/174f7e4e938f46b28b779895580e2028 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/174f7e4e938f46b28b779895580e2028 2024-12-08T11:21:17,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/51949c2a53df4b32a620260e113da4d5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/51949c2a53df4b32a620260e113da4d5 2024-12-08T11:21:17,855 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6795e4160db945e6ad8c15fbbf86a5b7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6795e4160db945e6ad8c15fbbf86a5b7 2024-12-08T11:21:17,856 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6b2b712c3adb47d08ab75f28a600069f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/6b2b712c3adb47d08ab75f28a600069f 2024-12-08T11:21:17,857 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3ff337b83ff1410b89b017480440a416 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3ff337b83ff1410b89b017480440a416 2024-12-08T11:21:17,858 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/073e1a1256b6448eb1e14aa3cdb0b3df to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/073e1a1256b6448eb1e14aa3cdb0b3df 2024-12-08T11:21:17,859 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0213f823f4474070b6ecb7c76f1e3f10 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/0213f823f4474070b6ecb7c76f1e3f10 2024-12-08T11:21:17,860 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/aef13fc4a6074ee1a20f7edb5b1ffaa8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/aef13fc4a6074ee1a20f7edb5b1ffaa8 2024-12-08T11:21:17,860 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/ecf43a40a1744904951e945baf7a1a4f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/ecf43a40a1744904951e945baf7a1a4f 2024-12-08T11:21:17,862 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/cb336533bbd448e9af23c7d742823c25 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/cb336533bbd448e9af23c7d742823c25 2024-12-08T11:21:17,863 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a8f39d3ee9814ddd9b203145ef34878c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a8f39d3ee9814ddd9b203145ef34878c 2024-12-08T11:21:17,864 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3f59cc457f674d12ae8de1534ed5d910 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/3f59cc457f674d12ae8de1534ed5d910 2024-12-08T11:21:17,865 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/87a6e5a268324e789f65375d628339d9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/87a6e5a268324e789f65375d628339d9 2024-12-08T11:21:17,865 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/49b563c8852a4a8ab5869c1b8ba67640 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/49b563c8852a4a8ab5869c1b8ba67640 2024-12-08T11:21:17,866 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/88c96ab8b8f54a5ab39799232e506c68 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/88c96ab8b8f54a5ab39799232e506c68 2024-12-08T11:21:17,867 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7c4f765f2ff4411185dbdead6c030749 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/7c4f765f2ff4411185dbdead6c030749 2024-12-08T11:21:17,869 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ccd5cf83bdb44288b2274fa8fc4d81f8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/dd4224e3816f42648f8876aace388fe8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/632dd9338341439ba4fac1596a72e0c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/082040eae6c74b78a6da0e6035580111, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/cd106c59d8914b58aedac67496162e4e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/20287007003c40f38ef0c48d45d1ac8f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/4c93fcc9833e4f5baa9f6c7be9f19221, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/5d93b9f2438e41f9b06920d79b6a8b84, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2a72c8cea8af4372888382677cba2b77, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c986c4ff224f4b6383fa35526e6d874c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/1a599df37c1f47d2938d3229afbb3f49, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2055b88e90534c0fb79ec2580341337a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a868baabb5a4542a2cb9c410bec1f8e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ab8d3fd586e4480ea95262be52d01b67, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ff23fc560b6d46bab07d9e2e12f34704, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f5f3e92d5e0645048fca950afa459d8a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c9ac20cfd42a440590008eeff4339fca, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/b7ceb633cdbc46caa950409484350d71, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2f9905de78044c3b8c79c659b10ddf0f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f7ad68f2e7be4fe6839954453152affe, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/0a63d8f91f9543898efa97161d1a0acd, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2ba16c5c4e904db5a15180142ef2f70e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/a71df23039a44d2399111f2c191a1dc1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8a8da24d8eac410e867b1838b45eaa2e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/50f1c1ba973f429d80f827fd830dbffc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/94cac8cf08f1450aac2409204c868790, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/444c2d7e6d184257a2a85e9076887b8a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/55ffddfde2c54964bf40288dab4d9f88, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/35cccdcd9afd45deb23bbbb661793ac1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2e7ebcde551149b8bfe47873796f46ac, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a928359922647a291eb57b334720abd, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/025c4692696e49a399ba7cc2d78d77b3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/bb61284ebd1247e483e8ca1df94c10f8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7484025662794de49172130bff05d34d] to archive 2024-12-08T11:21:17,870 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:17,871 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ccd5cf83bdb44288b2274fa8fc4d81f8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ccd5cf83bdb44288b2274fa8fc4d81f8 2024-12-08T11:21:17,872 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/dd4224e3816f42648f8876aace388fe8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/dd4224e3816f42648f8876aace388fe8 2024-12-08T11:21:17,873 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/632dd9338341439ba4fac1596a72e0c8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/632dd9338341439ba4fac1596a72e0c8 2024-12-08T11:21:17,874 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/082040eae6c74b78a6da0e6035580111 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/082040eae6c74b78a6da0e6035580111 2024-12-08T11:21:17,875 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/cd106c59d8914b58aedac67496162e4e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/cd106c59d8914b58aedac67496162e4e 2024-12-08T11:21:17,876 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/20287007003c40f38ef0c48d45d1ac8f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/20287007003c40f38ef0c48d45d1ac8f 2024-12-08T11:21:17,877 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/4c93fcc9833e4f5baa9f6c7be9f19221 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/4c93fcc9833e4f5baa9f6c7be9f19221 2024-12-08T11:21:17,878 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/5d93b9f2438e41f9b06920d79b6a8b84 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/5d93b9f2438e41f9b06920d79b6a8b84 2024-12-08T11:21:17,879 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2a72c8cea8af4372888382677cba2b77 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2a72c8cea8af4372888382677cba2b77 2024-12-08T11:21:17,880 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c986c4ff224f4b6383fa35526e6d874c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c986c4ff224f4b6383fa35526e6d874c 2024-12-08T11:21:17,881 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/1a599df37c1f47d2938d3229afbb3f49 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/1a599df37c1f47d2938d3229afbb3f49 2024-12-08T11:21:17,882 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2055b88e90534c0fb79ec2580341337a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2055b88e90534c0fb79ec2580341337a 2024-12-08T11:21:17,883 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a868baabb5a4542a2cb9c410bec1f8e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a868baabb5a4542a2cb9c410bec1f8e 2024-12-08T11:21:17,884 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ab8d3fd586e4480ea95262be52d01b67 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ab8d3fd586e4480ea95262be52d01b67 2024-12-08T11:21:17,885 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ff23fc560b6d46bab07d9e2e12f34704 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/ff23fc560b6d46bab07d9e2e12f34704 2024-12-08T11:21:17,886 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f5f3e92d5e0645048fca950afa459d8a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f5f3e92d5e0645048fca950afa459d8a 2024-12-08T11:21:17,887 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c9ac20cfd42a440590008eeff4339fca to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/c9ac20cfd42a440590008eeff4339fca 2024-12-08T11:21:17,888 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/b7ceb633cdbc46caa950409484350d71 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/b7ceb633cdbc46caa950409484350d71 2024-12-08T11:21:17,889 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2f9905de78044c3b8c79c659b10ddf0f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2f9905de78044c3b8c79c659b10ddf0f 2024-12-08T11:21:17,890 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f7ad68f2e7be4fe6839954453152affe to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/f7ad68f2e7be4fe6839954453152affe 2024-12-08T11:21:17,891 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/0a63d8f91f9543898efa97161d1a0acd to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/0a63d8f91f9543898efa97161d1a0acd 2024-12-08T11:21:17,892 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2ba16c5c4e904db5a15180142ef2f70e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2ba16c5c4e904db5a15180142ef2f70e 2024-12-08T11:21:17,893 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/a71df23039a44d2399111f2c191a1dc1 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/a71df23039a44d2399111f2c191a1dc1 2024-12-08T11:21:17,894 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8a8da24d8eac410e867b1838b45eaa2e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8a8da24d8eac410e867b1838b45eaa2e 2024-12-08T11:21:17,894 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/50f1c1ba973f429d80f827fd830dbffc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/50f1c1ba973f429d80f827fd830dbffc 2024-12-08T11:21:17,896 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/94cac8cf08f1450aac2409204c868790 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/94cac8cf08f1450aac2409204c868790 2024-12-08T11:21:17,897 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/444c2d7e6d184257a2a85e9076887b8a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/444c2d7e6d184257a2a85e9076887b8a 2024-12-08T11:21:17,898 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/55ffddfde2c54964bf40288dab4d9f88 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/55ffddfde2c54964bf40288dab4d9f88 2024-12-08T11:21:17,899 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/35cccdcd9afd45deb23bbbb661793ac1 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/35cccdcd9afd45deb23bbbb661793ac1 2024-12-08T11:21:17,900 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2e7ebcde551149b8bfe47873796f46ac to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/2e7ebcde551149b8bfe47873796f46ac 2024-12-08T11:21:17,901 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a928359922647a291eb57b334720abd to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/9a928359922647a291eb57b334720abd 2024-12-08T11:21:17,902 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/025c4692696e49a399ba7cc2d78d77b3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/025c4692696e49a399ba7cc2d78d77b3 2024-12-08T11:21:17,903 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/bb61284ebd1247e483e8ca1df94c10f8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/bb61284ebd1247e483e8ca1df94c10f8 2024-12-08T11:21:17,904 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7484025662794de49172130bff05d34d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7484025662794de49172130bff05d34d 2024-12-08T11:21:17,905 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/81424e84f7414643b08c2280811f795f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/a273155288754b1e8617d7e3bda21e17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/87fdbc49f546482884d3813dc4fca7ed, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/415ce065b02f420b9c845b1d99000e01, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/56b8f7899c5d43f4b3c764d036b01430, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bb77684d12ca4f38839ad1fbec5b452b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/329f3807e00f4051ae458dadb636f962, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c1293455a7c54f06880deb42c1748bbb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/fe1be07d01b1437a8ce5426370c4861b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c0908b11886e42a78b86575337928136, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/406753c5020742288b1c674d510fb042, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f431f0f5f1a247fb9567dd32d3a84c4b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ae055deeeb94ffb848805da5d8b852f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/edf31cdc982d4cdaa1d77845ade33a3e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f9be1a3290604675bfbc37ae6bd2546d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/6e05fb556cf14877b733186360c3e4b4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/972dd010928a40dbbeee6ee69b67448f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/7413f38c72f147ae9c985f729c41b7c3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2b26c3081e724648b9de0c42f728c66f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/093cede8b6b543179836b824fd43d43a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/412e3d9dcb114965a2a52f5a45eeb4c1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/b9a4213a31e644c0b74d2243a5b35e96, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bd11c6db1b394719bc8c719d03084587, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/0d85b2d2d7824095bfeb2971bd09e082, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/3cbb89e1ba7242b9855372e1f098377b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/95b238f4838a4f39ac3980908f6ec412, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/077af1a19c014929b92dffbfd88f4041, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/374fa245d93a4714bae09ba77b9ff98a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/4fa0c0a56abc403f8748a791cfd6668b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/9feedd7451c9492183dbbd47ad2e72bc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2bde2118ad0a42f2936468664602a11a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/64756b1b333249068d25ee75a31a9e54, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/5a0ffba9e952415ea35a77346c05e9e3] to archive 2024-12-08T11:21:17,906 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:17,908 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/81424e84f7414643b08c2280811f795f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/81424e84f7414643b08c2280811f795f 2024-12-08T11:21:17,909 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/a273155288754b1e8617d7e3bda21e17 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/a273155288754b1e8617d7e3bda21e17 2024-12-08T11:21:17,910 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/87fdbc49f546482884d3813dc4fca7ed to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/87fdbc49f546482884d3813dc4fca7ed 2024-12-08T11:21:17,911 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/415ce065b02f420b9c845b1d99000e01 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/415ce065b02f420b9c845b1d99000e01 2024-12-08T11:21:17,912 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/56b8f7899c5d43f4b3c764d036b01430 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/56b8f7899c5d43f4b3c764d036b01430 2024-12-08T11:21:17,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bb77684d12ca4f38839ad1fbec5b452b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bb77684d12ca4f38839ad1fbec5b452b 2024-12-08T11:21:17,913 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/329f3807e00f4051ae458dadb636f962 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/329f3807e00f4051ae458dadb636f962 2024-12-08T11:21:17,914 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c1293455a7c54f06880deb42c1748bbb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c1293455a7c54f06880deb42c1748bbb 2024-12-08T11:21:17,915 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/fe1be07d01b1437a8ce5426370c4861b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/fe1be07d01b1437a8ce5426370c4861b 2024-12-08T11:21:17,916 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c0908b11886e42a78b86575337928136 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/c0908b11886e42a78b86575337928136 2024-12-08T11:21:17,917 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/406753c5020742288b1c674d510fb042 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/406753c5020742288b1c674d510fb042 2024-12-08T11:21:17,917 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f431f0f5f1a247fb9567dd32d3a84c4b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f431f0f5f1a247fb9567dd32d3a84c4b 2024-12-08T11:21:17,918 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ae055deeeb94ffb848805da5d8b852f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ae055deeeb94ffb848805da5d8b852f 2024-12-08T11:21:17,919 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/edf31cdc982d4cdaa1d77845ade33a3e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/edf31cdc982d4cdaa1d77845ade33a3e 2024-12-08T11:21:17,920 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f9be1a3290604675bfbc37ae6bd2546d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/f9be1a3290604675bfbc37ae6bd2546d 2024-12-08T11:21:17,921 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/6e05fb556cf14877b733186360c3e4b4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/6e05fb556cf14877b733186360c3e4b4 2024-12-08T11:21:17,921 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/972dd010928a40dbbeee6ee69b67448f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/972dd010928a40dbbeee6ee69b67448f 2024-12-08T11:21:17,922 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/7413f38c72f147ae9c985f729c41b7c3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/7413f38c72f147ae9c985f729c41b7c3 2024-12-08T11:21:17,923 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2b26c3081e724648b9de0c42f728c66f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2b26c3081e724648b9de0c42f728c66f 2024-12-08T11:21:17,924 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/093cede8b6b543179836b824fd43d43a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/093cede8b6b543179836b824fd43d43a 2024-12-08T11:21:17,924 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/412e3d9dcb114965a2a52f5a45eeb4c1 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/412e3d9dcb114965a2a52f5a45eeb4c1 2024-12-08T11:21:17,925 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/b9a4213a31e644c0b74d2243a5b35e96 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/b9a4213a31e644c0b74d2243a5b35e96 2024-12-08T11:21:17,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T11:21:17,926 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bd11c6db1b394719bc8c719d03084587 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/bd11c6db1b394719bc8c719d03084587 2024-12-08T11:21:17,927 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/0d85b2d2d7824095bfeb2971bd09e082 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/0d85b2d2d7824095bfeb2971bd09e082 2024-12-08T11:21:17,928 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/3cbb89e1ba7242b9855372e1f098377b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/3cbb89e1ba7242b9855372e1f098377b 2024-12-08T11:21:17,929 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/95b238f4838a4f39ac3980908f6ec412 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/95b238f4838a4f39ac3980908f6ec412 2024-12-08T11:21:17,929 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/077af1a19c014929b92dffbfd88f4041 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/077af1a19c014929b92dffbfd88f4041 2024-12-08T11:21:17,930 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/374fa245d93a4714bae09ba77b9ff98a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/374fa245d93a4714bae09ba77b9ff98a 2024-12-08T11:21:17,931 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2ebf2ab2f7f14fcbae6b6d4ce632eb2b 2024-12-08T11:21:17,932 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/4fa0c0a56abc403f8748a791cfd6668b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/4fa0c0a56abc403f8748a791cfd6668b 2024-12-08T11:21:17,933 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/9feedd7451c9492183dbbd47ad2e72bc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/9feedd7451c9492183dbbd47ad2e72bc 2024-12-08T11:21:17,933 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2bde2118ad0a42f2936468664602a11a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/2bde2118ad0a42f2936468664602a11a 2024-12-08T11:21:17,934 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/64756b1b333249068d25ee75a31a9e54 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/64756b1b333249068d25ee75a31a9e54 2024-12-08T11:21:17,935 DEBUG [StoreCloser-TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/5a0ffba9e952415ea35a77346c05e9e3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/5a0ffba9e952415ea35a77346c05e9e3 2024-12-08T11:21:17,938 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/recovered.edits/525.seqid, newMaxSeqId=525, maxSeqId=1 2024-12-08T11:21:17,939 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2. 2024-12-08T11:21:17,939 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1635): Region close journal for eada905a64e5510a34d2159c0d8947a2: 2024-12-08T11:21:17,940 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(170): Closed eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:17,941 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=eada905a64e5510a34d2159c0d8947a2, regionState=CLOSED 2024-12-08T11:21:17,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-08T11:21:17,942 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseRegionProcedure eada905a64e5510a34d2159c0d8947a2, server=355ef6e50110,46083,1733656795491 in 1.1110 sec 2024-12-08T11:21:17,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-08T11:21:17,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=eada905a64e5510a34d2159c0d8947a2, UNASSIGN in 1.1140 sec 2024-12-08T11:21:17,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-12-08T11:21:17,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1160 sec 2024-12-08T11:21:17,945 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656877945"}]},"ts":"1733656877945"} 2024-12-08T11:21:17,946 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T11:21:17,948 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T11:21:17,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1270 sec 2024-12-08T11:21:18,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-08T11:21:18,927 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-08T11:21:18,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T11:21:18,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:18,929 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:18,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-08T11:21:18,930 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:18,932 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:18,934 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/recovered.edits] 2024-12-08T11:21:18,936 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2fc77d29a5bb45938d11d71665fd3f87 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/2fc77d29a5bb45938d11d71665fd3f87 2024-12-08T11:21:18,937 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a42d0d2f00804c90a1c2e25ea50c9b86 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/a42d0d2f00804c90a1c2e25ea50c9b86 2024-12-08T11:21:18,938 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/dfffe8db77f54c5797b527113618ea81 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/A/dfffe8db77f54c5797b527113618ea81 2024-12-08T11:21:18,940 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/18aadcea6c4d4cfda914616cb2a68983 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/18aadcea6c4d4cfda914616cb2a68983 2024-12-08T11:21:18,941 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7be11b8a40b8401e9d277b21f2834f7c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/7be11b8a40b8401e9d277b21f2834f7c 2024-12-08T11:21:18,942 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8cc4528adfce4a578cb1dacc37447336 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/B/8cc4528adfce4a578cb1dacc37447336 2024-12-08T11:21:18,943 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/77222df8a81d4e5997560406a3a9bc13 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/77222df8a81d4e5997560406a3a9bc13 2024-12-08T11:21:18,944 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/cbe89d05109a490e8296799f22873de9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/cbe89d05109a490e8296799f22873de9 2024-12-08T11:21:18,945 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/ee91f68625104b0d9a6879b15edd8be1 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/C/ee91f68625104b0d9a6879b15edd8be1 2024-12-08T11:21:18,948 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/recovered.edits/525.seqid to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2/recovered.edits/525.seqid 2024-12-08T11:21:18,948 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/eada905a64e5510a34d2159c0d8947a2 2024-12-08T11:21:18,948 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T11:21:18,950 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:18,953 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T11:21:18,956 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T11:21:18,957 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:18,957 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T11:21:18,957 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733656878957"}]},"ts":"9223372036854775807"} 2024-12-08T11:21:18,959 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T11:21:18,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => eada905a64e5510a34d2159c0d8947a2, NAME => 'TestAcidGuarantees,,1733656853482.eada905a64e5510a34d2159c0d8947a2.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T11:21:18,959 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T11:21:18,959 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733656878959"}]},"ts":"9223372036854775807"} 2024-12-08T11:21:18,961 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T11:21:18,962 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:18,963 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 35 msec 2024-12-08T11:21:19,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-08T11:21:19,031 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 95 completed 2024-12-08T11:21:19,040 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=240 (was 243), OpenFileDescriptor=453 (was 456), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=498 (was 515), ProcessCount=11 (was 11), AvailableMemoryMB=6739 (was 7135) 2024-12-08T11:21:19,049 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=240, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=498, ProcessCount=11, AvailableMemoryMB=6739 2024-12-08T11:21:19,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:21:19,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:21:19,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:19,052 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T11:21:19,052 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:19,052 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 96 2024-12-08T11:21:19,053 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T11:21:19,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-08T11:21:19,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742193_1369 (size=963) 2024-12-08T11:21:19,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-08T11:21:19,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-08T11:21:19,460 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:21:19,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742194_1370 (size=53) 2024-12-08T11:21:19,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-08T11:21:19,866 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:21:19,866 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 548fe72b592216ea9ca6f0d238246b28, disabling compactions & flushes 2024-12-08T11:21:19,866 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:19,866 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:19,866 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. after waiting 0 ms 2024-12-08T11:21:19,866 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:19,866 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:19,866 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:19,867 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T11:21:19,867 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733656879867"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733656879867"}]},"ts":"1733656879867"} 2024-12-08T11:21:19,868 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T11:21:19,869 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T11:21:19,869 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656879869"}]},"ts":"1733656879869"} 2024-12-08T11:21:19,870 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T11:21:19,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, ASSIGN}] 2024-12-08T11:21:19,874 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, ASSIGN 2024-12-08T11:21:19,875 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, ASSIGN; state=OFFLINE, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=false 2024-12-08T11:21:20,025 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:20,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; OpenRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:21:20,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-08T11:21:20,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:20,181 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:20,181 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7285): Opening region: {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:21:20,181 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,181 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:21:20,181 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7327): checking encryption for 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,181 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(7330): checking classloading for 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,183 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,184 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:20,184 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 548fe72b592216ea9ca6f0d238246b28 columnFamilyName A 2024-12-08T11:21:20,184 DEBUG [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:20,184 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(327): Store=548fe72b592216ea9ca6f0d238246b28/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:20,184 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,185 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:20,185 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 548fe72b592216ea9ca6f0d238246b28 columnFamilyName B 2024-12-08T11:21:20,185 DEBUG [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:20,186 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(327): Store=548fe72b592216ea9ca6f0d238246b28/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:20,186 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,187 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:20,187 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 548fe72b592216ea9ca6f0d238246b28 columnFamilyName C 2024-12-08T11:21:20,187 DEBUG [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:20,187 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(327): Store=548fe72b592216ea9ca6f0d238246b28/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:20,187 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:20,188 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,188 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,189 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:21:20,190 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1085): writing seq id for 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:20,192 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:21:20,192 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1102): Opened 548fe72b592216ea9ca6f0d238246b28; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60755485, jitterRate=-0.09467272460460663}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:21:20,193 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegion(1001): Region open journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:20,193 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., pid=98, masterSystemTime=1733656880178 2024-12-08T11:21:20,195 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:20,195 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=98}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:20,195 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:20,197 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-08T11:21:20,197 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; OpenRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 in 169 msec 2024-12-08T11:21:20,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-08T11:21:20,198 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, ASSIGN in 324 msec 2024-12-08T11:21:20,199 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T11:21:20,199 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656880199"}]},"ts":"1733656880199"} 2024-12-08T11:21:20,200 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T11:21:20,202 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T11:21:20,203 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1510 sec 2024-12-08T11:21:21,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=96 2024-12-08T11:21:21,157 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 96 completed 2024-12-08T11:21:21,159 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68ad882f to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f5b2180 2024-12-08T11:21:21,163 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34becda3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:21,164 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:21,166 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52310, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:21,167 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T11:21:21,167 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T11:21:21,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:21:21,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:21:21,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:21,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742195_1371 (size=999) 2024-12-08T11:21:21,580 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-08T11:21:21,580 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-08T11:21:21,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:21:21,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, REOPEN/MOVE}] 2024-12-08T11:21:21,584 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, REOPEN/MOVE 2024-12-08T11:21:21,584 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:21,585 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:21:21,585 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; CloseRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:21:21,736 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:21,737 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(124): Close 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:21,737 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:21:21,737 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1681): Closing 548fe72b592216ea9ca6f0d238246b28, disabling compactions & flushes 2024-12-08T11:21:21,737 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:21,737 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:21,737 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. after waiting 0 ms 2024-12-08T11:21:21,737 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:21,741 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-08T11:21:21,741 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:21,741 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegion(1635): Region close journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:21,741 WARN [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] regionserver.HRegionServer(3786): Not adding moved region record: 548fe72b592216ea9ca6f0d238246b28 to self. 2024-12-08T11:21:21,743 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=102}] handler.UnassignRegionHandler(170): Closed 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:21,743 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=CLOSED 2024-12-08T11:21:21,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-08T11:21:21,745 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; CloseRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 in 159 msec 2024-12-08T11:21:21,745 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=100, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, REOPEN/MOVE; state=CLOSED, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=true 2024-12-08T11:21:21,896 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:21,897 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=101, state=RUNNABLE; OpenRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:21:22,049 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,051 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,051 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:21:22,052 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,052 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:21:22,052 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,052 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,053 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,054 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:22,054 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 548fe72b592216ea9ca6f0d238246b28 columnFamilyName A 2024-12-08T11:21:22,055 DEBUG [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:22,056 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(327): Store=548fe72b592216ea9ca6f0d238246b28/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:22,056 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,057 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:22,057 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 548fe72b592216ea9ca6f0d238246b28 columnFamilyName B 2024-12-08T11:21:22,057 DEBUG [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:22,057 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(327): Store=548fe72b592216ea9ca6f0d238246b28/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:22,057 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,058 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:22,058 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 548fe72b592216ea9ca6f0d238246b28 columnFamilyName C 2024-12-08T11:21:22,058 DEBUG [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:22,059 INFO [StoreOpener-548fe72b592216ea9ca6f0d238246b28-1 {}] regionserver.HStore(327): Store=548fe72b592216ea9ca6f0d238246b28/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:22,059 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,059 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,060 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,061 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:21:22,063 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,063 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 548fe72b592216ea9ca6f0d238246b28; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72408077, jitterRate=0.07896442711353302}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:21:22,064 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:22,065 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., pid=103, masterSystemTime=1733656882048 2024-12-08T11:21:22,066 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,066 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,066 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=OPEN, openSeqNum=5, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,068 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=101 2024-12-08T11:21:22,069 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=101, state=SUCCESS; OpenRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 in 170 msec 2024-12-08T11:21:22,070 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-08T11:21:22,070 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, REOPEN/MOVE in 486 msec 2024-12-08T11:21:22,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-08T11:21:22,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 489 msec 2024-12-08T11:21:22,073 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 903 msec 2024-12-08T11:21:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-08T11:21:22,076 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-12-08T11:21:22,082 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,083 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-12-08T11:21:22,086 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,087 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-12-08T11:21:22,090 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,091 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-12-08T11:21:22,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,095 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-12-08T11:21:22,098 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,099 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-12-08T11:21:22,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,104 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-12-08T11:21:22,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,107 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-12-08T11:21:22,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,113 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-12-08T11:21:22,117 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,118 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-12-08T11:21:22,121 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:22,124 DEBUG [hconnection-0x7d5fcc27-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,124 DEBUG [hconnection-0x668b1085-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,124 DEBUG [hconnection-0x79850ff0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,125 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,125 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,126 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,128 DEBUG [hconnection-0x2813abac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,129 DEBUG [hconnection-0x57428fec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,130 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52352, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,131 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,132 DEBUG [hconnection-0x7f48e85d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,132 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:22,133 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-08T11:21:22,134 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:22,134 DEBUG [hconnection-0x4bb3c8a3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,135 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:22,136 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:22,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-08T11:21:22,137 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52380, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,137 DEBUG [hconnection-0x742f0a62-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,137 DEBUG [hconnection-0x111403b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:21:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:22,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:22,139 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52394, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,140 DEBUG [hconnection-0x3cadc092-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:22,141 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52402, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,142 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52412, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:22,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d2a98cee228e4c2898b0ff7262e53d0a_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656882135/Put/seqid=0 2024-12-08T11:21:22,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742196_1372 (size=12154) 2024-12-08T11:21:22,180 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:22,184 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d2a98cee228e4c2898b0ff7262e53d0a_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d2a98cee228e4c2898b0ff7262e53d0a_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:22,185 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/079b88cabe5446f29b1e18640e550808, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:22,185 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/079b88cabe5446f29b1e18640e550808 is 175, key is test_row_0/A:col10/1733656882135/Put/seqid=0 2024-12-08T11:21:22,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742197_1373 (size=30955) 2024-12-08T11:21:22,208 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656942202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656942203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656942208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656942209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656942209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-08T11:21:22,288 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-08T11:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656942310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656942310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656942312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656942312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,317 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656942313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-08T11:21:22,441 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-08T11:21:22,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:22,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,442 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656942511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656942516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656942518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656942518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656942519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,590 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/079b88cabe5446f29b1e18640e550808 2024-12-08T11:21:22,594 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-08T11:21:22,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:22,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,595 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/2237d0c3503249969f14aa8bcd783471 is 50, key is test_row_0/B:col10/1733656882135/Put/seqid=0 2024-12-08T11:21:22,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742198_1374 (size=12001) 2024-12-08T11:21:22,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-08T11:21:22,747 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,748 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-08T11:21:22,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:22,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,748 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656942817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656942820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656942824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656942824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:22,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656942824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,900 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:22,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-08T11:21:22,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:22,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:22,901 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:22,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:23,021 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/2237d0c3503249969f14aa8bcd783471 2024-12-08T11:21:23,053 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-08T11:21:23,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:23,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:23,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:23,054 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:23,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:23,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:23,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/d527115f083e4946b75eddb28594b022 is 50, key is test_row_0/C:col10/1733656882135/Put/seqid=0 2024-12-08T11:21:23,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742199_1375 (size=12001) 2024-12-08T11:21:23,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/d527115f083e4946b75eddb28594b022 2024-12-08T11:21:23,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/079b88cabe5446f29b1e18640e550808 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/079b88cabe5446f29b1e18640e550808 2024-12-08T11:21:23,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/079b88cabe5446f29b1e18640e550808, entries=150, sequenceid=15, filesize=30.2 K 2024-12-08T11:21:23,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/2237d0c3503249969f14aa8bcd783471 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/2237d0c3503249969f14aa8bcd783471 2024-12-08T11:21:23,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/2237d0c3503249969f14aa8bcd783471, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T11:21:23,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/d527115f083e4946b75eddb28594b022 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/d527115f083e4946b75eddb28594b022 2024-12-08T11:21:23,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/d527115f083e4946b75eddb28594b022, entries=150, sequenceid=15, filesize=11.7 K 2024-12-08T11:21:23,150 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 548fe72b592216ea9ca6f0d238246b28 in 1013ms, sequenceid=15, compaction requested=false 2024-12-08T11:21:23,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:23,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-08T11:21:23,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:23,208 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T11:21:23,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:23,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:23,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:23,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:23,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:23,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:23,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-08T11:21:23,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a1e559740f69414bb905f8252159299d_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656882208/Put/seqid=0 2024-12-08T11:21:23,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742200_1376 (size=12154) 2024-12-08T11:21:23,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:23,269 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208a1e559740f69414bb905f8252159299d_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a1e559740f69414bb905f8252159299d_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:23,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3cb5aabe19f843a28602a437dac28a8c, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:23,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3cb5aabe19f843a28602a437dac28a8c is 175, key is test_row_0/A:col10/1733656882208/Put/seqid=0 2024-12-08T11:21:23,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742201_1377 (size=30955) 2024-12-08T11:21:23,278 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3cb5aabe19f843a28602a437dac28a8c 2024-12-08T11:21:23,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/039670f712d043f4a090d189c801c263 is 50, key is test_row_0/B:col10/1733656882208/Put/seqid=0 2024-12-08T11:21:23,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742202_1378 (size=12001) 2024-12-08T11:21:23,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:23,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:23,341 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656943335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656943338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656943338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656943340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656943341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656943445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656943445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656943445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656943445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656943451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,517 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T11:21:23,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656943649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656943649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656943649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656943650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656943655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,696 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/039670f712d043f4a090d189c801c263 2024-12-08T11:21:23,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b4a385f573e2400bae7342dbf3168b05 is 50, key is test_row_0/C:col10/1733656882208/Put/seqid=0 2024-12-08T11:21:23,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742203_1379 (size=12001) 2024-12-08T11:21:23,829 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T11:21:23,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656943954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,958 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656943954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656943954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656943955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:23,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:23,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656943960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,110 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b4a385f573e2400bae7342dbf3168b05 2024-12-08T11:21:24,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3cb5aabe19f843a28602a437dac28a8c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3cb5aabe19f843a28602a437dac28a8c 2024-12-08T11:21:24,118 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3cb5aabe19f843a28602a437dac28a8c, entries=150, sequenceid=41, filesize=30.2 K 2024-12-08T11:21:24,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/039670f712d043f4a090d189c801c263 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/039670f712d043f4a090d189c801c263 2024-12-08T11:21:24,128 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/039670f712d043f4a090d189c801c263, entries=150, sequenceid=41, filesize=11.7 K 2024-12-08T11:21:24,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b4a385f573e2400bae7342dbf3168b05 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b4a385f573e2400bae7342dbf3168b05 2024-12-08T11:21:24,132 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b4a385f573e2400bae7342dbf3168b05, entries=150, sequenceid=41, filesize=11.7 K 2024-12-08T11:21:24,133 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 548fe72b592216ea9ca6f0d238246b28 in 926ms, sequenceid=41, compaction requested=false 2024-12-08T11:21:24,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:24,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:24,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-08T11:21:24,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-08T11:21:24,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-08T11:21:24,136 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0000 sec 2024-12-08T11:21:24,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 2.0040 sec 2024-12-08T11:21:24,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-08T11:21:24,241 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-08T11:21:24,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:24,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-08T11:21:24,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T11:21:24,244 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:24,245 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:24,245 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T11:21:24,396 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-08T11:21:24,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:24,397 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T11:21:24,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:24,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:24,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:24,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:24,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:24,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:24,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086224c657cdd34658a0b5bf1f4f7a9759_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656883337/Put/seqid=0 2024-12-08T11:21:24,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742204_1380 (size=12154) 2024-12-08T11:21:24,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:24,426 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086224c657cdd34658a0b5bf1f4f7a9759_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086224c657cdd34658a0b5bf1f4f7a9759_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:24,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/21fb16a060544f8d86ca817bdd824dbc, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:24,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/21fb16a060544f8d86ca817bdd824dbc is 175, key is test_row_0/A:col10/1733656883337/Put/seqid=0 2024-12-08T11:21:24,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742205_1381 (size=30955) 2024-12-08T11:21:24,454 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/21fb16a060544f8d86ca817bdd824dbc 2024-12-08T11:21:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:24,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:24,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/16cc3244384d4854b0bbd7045be74d0d is 50, key is test_row_0/B:col10/1733656883337/Put/seqid=0 2024-12-08T11:21:24,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742206_1382 (size=12001) 2024-12-08T11:21:24,476 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/16cc3244384d4854b0bbd7045be74d0d 2024-12-08T11:21:24,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b8c2c7021edb40fca65e7c95815f5b54 is 50, key is test_row_0/C:col10/1733656883337/Put/seqid=0 2024-12-08T11:21:24,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656944494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656944499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656944500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742207_1383 (size=12001) 2024-12-08T11:21:24,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656944502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656944503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T11:21:24,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656944604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,610 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656944604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656944607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656944613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656944614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656944812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656944812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,820 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656944814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656944820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656944821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:24,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T11:21:24,907 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b8c2c7021edb40fca65e7c95815f5b54 2024-12-08T11:21:24,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/21fb16a060544f8d86ca817bdd824dbc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/21fb16a060544f8d86ca817bdd824dbc 2024-12-08T11:21:24,929 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/21fb16a060544f8d86ca817bdd824dbc, entries=150, sequenceid=51, filesize=30.2 K 2024-12-08T11:21:24,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/16cc3244384d4854b0bbd7045be74d0d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/16cc3244384d4854b0bbd7045be74d0d 2024-12-08T11:21:24,941 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/16cc3244384d4854b0bbd7045be74d0d, entries=150, sequenceid=51, filesize=11.7 K 2024-12-08T11:21:24,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b8c2c7021edb40fca65e7c95815f5b54 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b8c2c7021edb40fca65e7c95815f5b54 2024-12-08T11:21:24,946 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b8c2c7021edb40fca65e7c95815f5b54, entries=150, sequenceid=51, filesize=11.7 K 2024-12-08T11:21:24,947 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 548fe72b592216ea9ca6f0d238246b28 in 550ms, sequenceid=51, compaction requested=true 2024-12-08T11:21:24,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:24,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:24,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-08T11:21:24,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-08T11:21:24,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-08T11:21:24,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 704 msec 2024-12-08T11:21:24,952 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 708 msec 2024-12-08T11:21:25,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:25,118 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-08T11:21:25,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:25,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:25,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:25,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:25,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:25,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:25,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208287caa492f76436ba6d55f988af47926_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656885118/Put/seqid=0 2024-12-08T11:21:25,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656945128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656945129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656945131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656945131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742208_1384 (size=17034) 2024-12-08T11:21:25,136 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,142 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208287caa492f76436ba6d55f988af47926_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208287caa492f76436ba6d55f988af47926_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:25,143 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/04636050436a43f8bf1f324da8204c31, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:25,143 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656945137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/04636050436a43f8bf1f324da8204c31 is 175, key is test_row_0/A:col10/1733656885118/Put/seqid=0 2024-12-08T11:21:25,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742209_1385 (size=48139) 2024-12-08T11:21:25,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656945231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656945232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,236 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656945232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656945237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-08T11:21:25,348 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-08T11:21:25,349 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-08T11:21:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T11:21:25,350 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:25,351 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:25,351 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:25,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656945434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656945438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656945438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656945443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T11:21:25,502 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-08T11:21:25,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:25,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,549 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/04636050436a43f8bf1f324da8204c31 2024-12-08T11:21:25,556 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/15a306e65cc5433590fa8f5d7f1d7c32 is 50, key is test_row_0/B:col10/1733656885118/Put/seqid=0 2024-12-08T11:21:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742210_1386 (size=12001) 2024-12-08T11:21:25,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/15a306e65cc5433590fa8f5d7f1d7c32 2024-12-08T11:21:25,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/9ff7e107364c4f08b67fb6a1b6d27c34 is 50, key is test_row_0/C:col10/1733656885118/Put/seqid=0 2024-12-08T11:21:25,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742211_1387 (size=12001) 2024-12-08T11:21:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T11:21:25,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656945648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,655 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-08T11:21:25,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:25,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,656 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,656 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656945741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,747 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656945741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656945743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,756 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:25,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656945750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,808 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,808 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-08T11:21:25,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:25,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,808 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T11:21:25,960 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:25,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-08T11:21:25,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:25,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,961 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:25,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/9ff7e107364c4f08b67fb6a1b6d27c34 2024-12-08T11:21:25,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/04636050436a43f8bf1f324da8204c31 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/04636050436a43f8bf1f324da8204c31 2024-12-08T11:21:25,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/04636050436a43f8bf1f324da8204c31, entries=250, sequenceid=79, filesize=47.0 K 2024-12-08T11:21:25,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/15a306e65cc5433590fa8f5d7f1d7c32 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/15a306e65cc5433590fa8f5d7f1d7c32 2024-12-08T11:21:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/15a306e65cc5433590fa8f5d7f1d7c32, entries=150, sequenceid=79, filesize=11.7 K 2024-12-08T11:21:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/9ff7e107364c4f08b67fb6a1b6d27c34 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9ff7e107364c4f08b67fb6a1b6d27c34 2024-12-08T11:21:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9ff7e107364c4f08b67fb6a1b6d27c34, entries=150, sequenceid=79, filesize=11.7 K 2024-12-08T11:21:25,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 548fe72b592216ea9ca6f0d238246b28 in 873ms, sequenceid=79, compaction requested=true 2024-12-08T11:21:25,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:25,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:25,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:25,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:25,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:25,991 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:25,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:25,991 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:21:25,991 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:25,992 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:25,992 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:25,992 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/A is initiating minor compaction (all files) 2024-12-08T11:21:25,992 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/B is initiating minor compaction (all files) 2024-12-08T11:21:25,992 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/A in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,993 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/079b88cabe5446f29b1e18640e550808, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3cb5aabe19f843a28602a437dac28a8c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/21fb16a060544f8d86ca817bdd824dbc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/04636050436a43f8bf1f324da8204c31] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=137.7 K 2024-12-08T11:21:25,993 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,993 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/079b88cabe5446f29b1e18640e550808, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3cb5aabe19f843a28602a437dac28a8c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/21fb16a060544f8d86ca817bdd824dbc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/04636050436a43f8bf1f324da8204c31] 2024-12-08T11:21:25,993 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/B in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:25,993 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/2237d0c3503249969f14aa8bcd783471, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/039670f712d043f4a090d189c801c263, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/16cc3244384d4854b0bbd7045be74d0d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/15a306e65cc5433590fa8f5d7f1d7c32] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=46.9 K 2024-12-08T11:21:25,993 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 079b88cabe5446f29b1e18640e550808, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733656882135 2024-12-08T11:21:25,993 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2237d0c3503249969f14aa8bcd783471, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733656882135 2024-12-08T11:21:25,994 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cb5aabe19f843a28602a437dac28a8c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733656882206 2024-12-08T11:21:25,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 039670f712d043f4a090d189c801c263, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733656882206 2024-12-08T11:21:25,994 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21fb16a060544f8d86ca817bdd824dbc, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656883336 2024-12-08T11:21:25,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 16cc3244384d4854b0bbd7045be74d0d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656883336 2024-12-08T11:21:25,994 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04636050436a43f8bf1f324da8204c31, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656884487 2024-12-08T11:21:25,994 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 15a306e65cc5433590fa8f5d7f1d7c32, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656884496 2024-12-08T11:21:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,009 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,031 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#B#compaction#334 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:26,032 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/78aa135ceba24bc5b1b33ad198cb9251 is 50, key is test_row_0/B:col10/1733656885118/Put/seqid=0 2024-12-08T11:21:26,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,033 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412086f52de15d25f4927a32134f2fbc9080c_548fe72b592216ea9ca6f0d238246b28 store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,046 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412086f52de15d25f4927a32134f2fbc9080c_548fe72b592216ea9ca6f0d238246b28, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,046 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086f52de15d25f4927a32134f2fbc9080c_548fe72b592216ea9ca6f0d238246b28 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742213_1389 (size=4469) 2024-12-08T11:21:26,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742212_1388 (size=12139) 2024-12-08T11:21:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,062 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#A#compaction#333 average throughput is 0.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,063 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d0fb6fc9f39b440eb8246ce54d35e755 is 175, key is test_row_0/A:col10/1733656885118/Put/seqid=0 2024-12-08T11:21:26,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,069 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/78aa135ceba24bc5b1b33ad198cb9251 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/78aa135ceba24bc5b1b33ad198cb9251 2024-12-08T11:21:26,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,073 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/B of 548fe72b592216ea9ca6f0d238246b28 into 78aa135ceba24bc5b1b33ad198cb9251(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:26,073 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:26,073 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/B, priority=12, startTime=1733656885991; duration=0sec 2024-12-08T11:21:26,073 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:26,073 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:B 2024-12-08T11:21:26,074 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:26,075 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:26,075 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/C is initiating minor compaction (all files) 2024-12-08T11:21:26,075 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/C in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:26,075 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/d527115f083e4946b75eddb28594b022, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b4a385f573e2400bae7342dbf3168b05, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b8c2c7021edb40fca65e7c95815f5b54, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9ff7e107364c4f08b67fb6a1b6d27c34] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=46.9 K 2024-12-08T11:21:26,075 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d527115f083e4946b75eddb28594b022, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1733656882135 2024-12-08T11:21:26,075 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b4a385f573e2400bae7342dbf3168b05, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733656882206 2024-12-08T11:21:26,076 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b8c2c7021edb40fca65e7c95815f5b54, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1733656883336 2024-12-08T11:21:26,076 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9ff7e107364c4f08b67fb6a1b6d27c34, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656884496 2024-12-08T11:21:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742214_1390 (size=31093) 2024-12-08T11:21:26,102 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#C#compaction#335 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:26,104 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/90170f69731f487d9479d599035b660b is 50, key is test_row_0/C:col10/1733656885118/Put/seqid=0 2024-12-08T11:21:26,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742215_1391 (size=12139) 2024-12-08T11:21:26,113 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-08T11:21:26,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:26,114 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T11:21:26,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:26,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:26,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:26,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:26,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:26,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:26,120 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/90170f69731f487d9479d599035b660b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/90170f69731f487d9479d599035b660b 2024-12-08T11:21:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,125 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/C of 548fe72b592216ea9ca6f0d238246b28 into 90170f69731f487d9479d599035b660b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:26,125 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:26,125 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/C, priority=12, startTime=1733656885991; duration=0sec 2024-12-08T11:21:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,125 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:26,125 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:C 2024-12-08T11:21:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f9592043d91e4eeb8457b8376384ece4_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656885128/Put/seqid=0 2024-12-08T11:21:26,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742216_1392 (size=9714) 2024-12-08T11:21:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:26,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:26,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656946356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656946359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656946360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,365 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656946361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T11:21:26,469 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656946465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656946466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656946466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656946466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,498 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d0fb6fc9f39b440eb8246ce54d35e755 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d0fb6fc9f39b440eb8246ce54d35e755 2024-12-08T11:21:26,504 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/A of 548fe72b592216ea9ca6f0d238246b28 into d0fb6fc9f39b440eb8246ce54d35e755(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:26,504 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:26,504 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/A, priority=12, startTime=1733656885990; duration=0sec 2024-12-08T11:21:26,504 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:26,504 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:A 2024-12-08T11:21:26,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,563 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f9592043d91e4eeb8457b8376384ece4_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f9592043d91e4eeb8457b8376384ece4_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:26,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c1ea9d858d0141ed80b52d779b6ecab0, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:26,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c1ea9d858d0141ed80b52d779b6ecab0 is 175, key is test_row_0/A:col10/1733656885128/Put/seqid=0 2024-12-08T11:21:26,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742217_1393 (size=22361) 2024-12-08T11:21:26,578 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=88, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c1ea9d858d0141ed80b52d779b6ecab0 2024-12-08T11:21:26,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/9befc3c0d73e49ff9c84eee4c8fe9f75 is 50, key is test_row_0/B:col10/1733656885128/Put/seqid=0 2024-12-08T11:21:26,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742218_1394 (size=9657) 2024-12-08T11:21:26,600 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/9befc3c0d73e49ff9c84eee4c8fe9f75 2024-12-08T11:21:26,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b0739feaf9f44b52baa29920eaec7891 is 50, key is test_row_0/C:col10/1733656885128/Put/seqid=0 2024-12-08T11:21:26,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742219_1395 (size=9657) 2024-12-08T11:21:26,622 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b0739feaf9f44b52baa29920eaec7891 2024-12-08T11:21:26,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c1ea9d858d0141ed80b52d779b6ecab0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c1ea9d858d0141ed80b52d779b6ecab0 2024-12-08T11:21:26,630 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c1ea9d858d0141ed80b52d779b6ecab0, entries=100, sequenceid=88, filesize=21.8 K 2024-12-08T11:21:26,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/9befc3c0d73e49ff9c84eee4c8fe9f75 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/9befc3c0d73e49ff9c84eee4c8fe9f75 2024-12-08T11:21:26,634 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/9befc3c0d73e49ff9c84eee4c8fe9f75, entries=100, sequenceid=88, filesize=9.4 K 2024-12-08T11:21:26,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b0739feaf9f44b52baa29920eaec7891 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b0739feaf9f44b52baa29920eaec7891 2024-12-08T11:21:26,639 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b0739feaf9f44b52baa29920eaec7891, entries=100, sequenceid=88, filesize=9.4 K 2024-12-08T11:21:26,639 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 548fe72b592216ea9ca6f0d238246b28 in 525ms, sequenceid=88, compaction requested=false 2024-12-08T11:21:26,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:26,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:26,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-08T11:21:26,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-08T11:21:26,642 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-08T11:21:26,642 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2900 sec 2024-12-08T11:21:26,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.2930 sec 2024-12-08T11:21:26,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:26,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-12-08T11:21:26,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:26,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:26,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:26,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:26,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:26,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:26,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208bcd20de5430e4facbbe0d08d60305961_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656886662/Put/seqid=0 2024-12-08T11:21:26,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656946672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656946673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656946675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656946675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656946676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742220_1396 (size=14594) 2024-12-08T11:21:26,687 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:26,693 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208bcd20de5430e4facbbe0d08d60305961_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bcd20de5430e4facbbe0d08d60305961_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:26,701 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2539acadc08744489f1c5b11cf9d083b, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:26,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2539acadc08744489f1c5b11cf9d083b is 175, key is test_row_0/A:col10/1733656886662/Put/seqid=0 2024-12-08T11:21:26,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742221_1397 (size=39549) 2024-12-08T11:21:26,709 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2539acadc08744489f1c5b11cf9d083b 2024-12-08T11:21:26,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/31061eb786ed4af197fde66f589179a8 is 50, key is test_row_0/B:col10/1733656886662/Put/seqid=0 2024-12-08T11:21:26,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742222_1398 (size=12001) 2024-12-08T11:21:26,739 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/31061eb786ed4af197fde66f589179a8 2024-12-08T11:21:26,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/9bb9f86711424b9c901e1be77e05a3d8 is 50, key is test_row_0/C:col10/1733656886662/Put/seqid=0 2024-12-08T11:21:26,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742223_1399 (size=12001) 2024-12-08T11:21:26,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656946778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656946781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656946781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656946978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656946979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656946982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656946985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:26,989 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:26,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656946985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,151 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/9bb9f86711424b9c901e1be77e05a3d8 2024-12-08T11:21:27,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2539acadc08744489f1c5b11cf9d083b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2539acadc08744489f1c5b11cf9d083b 2024-12-08T11:21:27,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2539acadc08744489f1c5b11cf9d083b, entries=200, sequenceid=119, filesize=38.6 K 2024-12-08T11:21:27,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/31061eb786ed4af197fde66f589179a8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/31061eb786ed4af197fde66f589179a8 2024-12-08T11:21:27,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/31061eb786ed4af197fde66f589179a8, entries=150, sequenceid=119, filesize=11.7 K 2024-12-08T11:21:27,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/9bb9f86711424b9c901e1be77e05a3d8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9bb9f86711424b9c901e1be77e05a3d8 2024-12-08T11:21:27,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9bb9f86711424b9c901e1be77e05a3d8, entries=150, sequenceid=119, filesize=11.7 K 2024-12-08T11:21:27,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 548fe72b592216ea9ca6f0d238246b28 in 508ms, sequenceid=119, compaction requested=true 2024-12-08T11:21:27,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:27,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:27,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:27,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:27,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:27,171 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:27,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:27,171 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:27,171 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:27,172 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:27,172 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:27,172 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/A is initiating minor compaction (all files) 2024-12-08T11:21:27,172 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/B is initiating minor compaction (all files) 2024-12-08T11:21:27,172 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/A in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,172 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/B in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,172 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/78aa135ceba24bc5b1b33ad198cb9251, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/9befc3c0d73e49ff9c84eee4c8fe9f75, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/31061eb786ed4af197fde66f589179a8] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=33.0 K 2024-12-08T11:21:27,172 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d0fb6fc9f39b440eb8246ce54d35e755, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c1ea9d858d0141ed80b52d779b6ecab0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2539acadc08744489f1c5b11cf9d083b] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=90.8 K 2024-12-08T11:21:27,172 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,173 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d0fb6fc9f39b440eb8246ce54d35e755, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c1ea9d858d0141ed80b52d779b6ecab0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2539acadc08744489f1c5b11cf9d083b] 2024-12-08T11:21:27,173 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0fb6fc9f39b440eb8246ce54d35e755, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656884496 2024-12-08T11:21:27,173 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 78aa135ceba24bc5b1b33ad198cb9251, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656884496 2024-12-08T11:21:27,173 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9befc3c0d73e49ff9c84eee4c8fe9f75, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656885125 2024-12-08T11:21:27,173 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1ea9d858d0141ed80b52d779b6ecab0, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656885125 2024-12-08T11:21:27,174 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 31061eb786ed4af197fde66f589179a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656886352 2024-12-08T11:21:27,174 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2539acadc08744489f1c5b11cf9d083b, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656886352 2024-12-08T11:21:27,182 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:27,184 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#B#compaction#343 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:27,184 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/a2a68e3d2f6e4aa5b59408743946047b is 50, key is test_row_0/B:col10/1733656886662/Put/seqid=0 2024-12-08T11:21:27,189 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412085b9062cc41b342af9c70f96b7ff5b61f_548fe72b592216ea9ca6f0d238246b28 store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:27,190 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412085b9062cc41b342af9c70f96b7ff5b61f_548fe72b592216ea9ca6f0d238246b28, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:27,190 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085b9062cc41b342af9c70f96b7ff5b61f_548fe72b592216ea9ca6f0d238246b28 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:27,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742224_1400 (size=12241) 2024-12-08T11:21:27,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742225_1401 (size=4469) 2024-12-08T11:21:27,199 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#A#compaction#342 average throughput is 1.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:27,199 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/641d6976d00c46b8b70e2e510ab02e9f is 175, key is test_row_0/A:col10/1733656886662/Put/seqid=0 2024-12-08T11:21:27,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742226_1402 (size=31195) 2024-12-08T11:21:27,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:27,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:21:27,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:27,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:27,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:27,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:27,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:27,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:27,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085c264f52ef814b82bc1dfb86b7173c70_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656887295/Put/seqid=0 2024-12-08T11:21:27,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742227_1403 (size=17134) 2024-12-08T11:21:27,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656947348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656947349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656947350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-08T11:21:27,454 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-08T11:21:27,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-08T11:21:27,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656947453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656947455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,457 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T11:21:27,457 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:27,458 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:27,466 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656947460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656947483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656947484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T11:21:27,597 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/a2a68e3d2f6e4aa5b59408743946047b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a2a68e3d2f6e4aa5b59408743946047b 2024-12-08T11:21:27,602 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/B of 548fe72b592216ea9ca6f0d238246b28 into a2a68e3d2f6e4aa5b59408743946047b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:27,602 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:27,602 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/B, priority=13, startTime=1733656887171; duration=0sec 2024-12-08T11:21:27,602 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:27,602 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:B 2024-12-08T11:21:27,602 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:27,603 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:27,603 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/C is initiating minor compaction (all files) 2024-12-08T11:21:27,603 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/C in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,604 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/90170f69731f487d9479d599035b660b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b0739feaf9f44b52baa29920eaec7891, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9bb9f86711424b9c901e1be77e05a3d8] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=33.0 K 2024-12-08T11:21:27,604 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 90170f69731f487d9479d599035b660b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656884496 2024-12-08T11:21:27,604 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b0739feaf9f44b52baa29920eaec7891, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656885125 2024-12-08T11:21:27,605 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bb9f86711424b9c901e1be77e05a3d8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656886352 2024-12-08T11:21:27,609 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T11:21:27,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:27,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,610 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,611 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#C#compaction#345 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:27,611 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/38933341932c44a699c09f2c7efe64d9 is 50, key is test_row_0/C:col10/1733656886662/Put/seqid=0 2024-12-08T11:21:27,617 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/641d6976d00c46b8b70e2e510ab02e9f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/641d6976d00c46b8b70e2e510ab02e9f 2024-12-08T11:21:27,623 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/A of 548fe72b592216ea9ca6f0d238246b28 into 641d6976d00c46b8b70e2e510ab02e9f(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:27,623 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:27,623 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/A, priority=13, startTime=1733656887171; duration=0sec 2024-12-08T11:21:27,623 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:27,623 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:A 2024-12-08T11:21:27,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742228_1404 (size=12241) 2024-12-08T11:21:27,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656947658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,662 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656947658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,668 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656947667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,718 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:27,722 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085c264f52ef814b82bc1dfb86b7173c70_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085c264f52ef814b82bc1dfb86b7173c70_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:27,722 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bc25eb08fac5456ba8663f746dcb1b17, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:27,723 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bc25eb08fac5456ba8663f746dcb1b17 is 175, key is test_row_0/A:col10/1733656887295/Put/seqid=0 2024-12-08T11:21:27,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742229_1405 (size=48239) 2024-12-08T11:21:27,728 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bc25eb08fac5456ba8663f746dcb1b17 2024-12-08T11:21:27,736 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/828bfb8fec2a445c8e73b88852c62deb is 50, key is test_row_0/B:col10/1733656887295/Put/seqid=0 2024-12-08T11:21:27,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742230_1406 (size=12101) 2024-12-08T11:21:27,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/828bfb8fec2a445c8e73b88852c62deb 2024-12-08T11:21:27,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/68986711a7c646888264ae7824690d75 is 50, key is test_row_0/C:col10/1733656887295/Put/seqid=0 2024-12-08T11:21:27,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T11:21:27,761 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T11:21:27,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:27,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,762 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742231_1407 (size=12101) 2024-12-08T11:21:27,914 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T11:21:27,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:27,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:27,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:27,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656947963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656947965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:27,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656947971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,035 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/38933341932c44a699c09f2c7efe64d9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/38933341932c44a699c09f2c7efe64d9 2024-12-08T11:21:28,039 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/C of 548fe72b592216ea9ca6f0d238246b28 into 38933341932c44a699c09f2c7efe64d9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:28,039 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:28,039 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/C, priority=13, startTime=1733656887171; duration=0sec 2024-12-08T11:21:28,040 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:28,040 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:C 2024-12-08T11:21:28,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T11:21:28,067 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T11:21:28,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:28,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:28,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:28,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:28,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:28,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:28,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/68986711a7c646888264ae7824690d75 2024-12-08T11:21:28,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bc25eb08fac5456ba8663f746dcb1b17 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bc25eb08fac5456ba8663f746dcb1b17 2024-12-08T11:21:28,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bc25eb08fac5456ba8663f746dcb1b17, entries=250, sequenceid=130, filesize=47.1 K 2024-12-08T11:21:28,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/828bfb8fec2a445c8e73b88852c62deb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/828bfb8fec2a445c8e73b88852c62deb 2024-12-08T11:21:28,183 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/828bfb8fec2a445c8e73b88852c62deb, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T11:21:28,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/68986711a7c646888264ae7824690d75 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/68986711a7c646888264ae7824690d75 2024-12-08T11:21:28,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/68986711a7c646888264ae7824690d75, entries=150, sequenceid=130, filesize=11.8 K 2024-12-08T11:21:28,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 548fe72b592216ea9ca6f0d238246b28 in 892ms, sequenceid=130, compaction requested=false 2024-12-08T11:21:28,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:28,220 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-08T11:21:28,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:28,221 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:21:28,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:28,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:28,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:28,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:28,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:28,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:28,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081567626b1a7346008683f16f1e4f9f40_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656887348/Put/seqid=0 2024-12-08T11:21:28,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742232_1408 (size=12304) 2024-12-08T11:21:28,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:28,239 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081567626b1a7346008683f16f1e4f9f40_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081567626b1a7346008683f16f1e4f9f40_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:28,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/e21b8513f05f45b584efbc17a1a46fdf, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:28,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/e21b8513f05f45b584efbc17a1a46fdf is 175, key is test_row_0/A:col10/1733656887348/Put/seqid=0 2024-12-08T11:21:28,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742233_1409 (size=31105) 2024-12-08T11:21:28,254 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/e21b8513f05f45b584efbc17a1a46fdf 2024-12-08T11:21:28,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/00e3d89f519f49bd991f29459f098dcb is 50, key is test_row_0/B:col10/1733656887348/Put/seqid=0 2024-12-08T11:21:28,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742234_1410 (size=12151) 2024-12-08T11:21:28,281 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/00e3d89f519f49bd991f29459f098dcb 2024-12-08T11:21:28,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/2306a92d11b048f3a811a5fb75f8b724 is 50, key is test_row_0/C:col10/1733656887348/Put/seqid=0 2024-12-08T11:21:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742235_1411 (size=12151) 2024-12-08T11:21:28,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:28,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:28,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656948488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656948488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656948491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656948495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656948496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T11:21:28,598 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656948597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656948597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656948597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,703 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/2306a92d11b048f3a811a5fb75f8b724 2024-12-08T11:21:28,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/e21b8513f05f45b584efbc17a1a46fdf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/e21b8513f05f45b584efbc17a1a46fdf 2024-12-08T11:21:28,711 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/e21b8513f05f45b584efbc17a1a46fdf, entries=150, sequenceid=158, filesize=30.4 K 2024-12-08T11:21:28,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/00e3d89f519f49bd991f29459f098dcb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/00e3d89f519f49bd991f29459f098dcb 2024-12-08T11:21:28,719 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/00e3d89f519f49bd991f29459f098dcb, entries=150, sequenceid=158, filesize=11.9 K 2024-12-08T11:21:28,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/2306a92d11b048f3a811a5fb75f8b724 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2306a92d11b048f3a811a5fb75f8b724 2024-12-08T11:21:28,724 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2306a92d11b048f3a811a5fb75f8b724, entries=150, sequenceid=158, filesize=11.9 K 2024-12-08T11:21:28,724 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 548fe72b592216ea9ca6f0d238246b28 in 503ms, sequenceid=158, compaction requested=true 2024-12-08T11:21:28,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:28,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:28,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-08T11:21:28,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-08T11:21:28,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-08T11:21:28,727 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2680 sec 2024-12-08T11:21:28,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.2730 sec 2024-12-08T11:21:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:28,802 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:21:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:28,827 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c9579b892b3041d5b44bb3932b018656_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656888490/Put/seqid=0 2024-12-08T11:21:28,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742236_1412 (size=14794) 2024-12-08T11:21:28,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656948904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656948908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:28,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:28,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656948912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656949014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656949015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656949015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656949221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656949223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656949224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,244 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:29,248 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c9579b892b3041d5b44bb3932b018656_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c9579b892b3041d5b44bb3932b018656_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:29,248 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/34d8cc2acd9b406a851a61c59e0423d6, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:29,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/34d8cc2acd9b406a851a61c59e0423d6 is 175, key is test_row_0/A:col10/1733656888490/Put/seqid=0 2024-12-08T11:21:29,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742237_1413 (size=39749) 2024-12-08T11:21:29,254 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/34d8cc2acd9b406a851a61c59e0423d6 2024-12-08T11:21:29,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/1dbbc8e1c46249059bd69d55dd4ffae6 is 50, key is test_row_0/B:col10/1733656888490/Put/seqid=0 2024-12-08T11:21:29,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742238_1414 (size=12151) 2024-12-08T11:21:29,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/1dbbc8e1c46249059bd69d55dd4ffae6 2024-12-08T11:21:29,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/948ee3dd8eb54c3eb5dd84fe377bf03f is 50, key is test_row_0/C:col10/1733656888490/Put/seqid=0 2024-12-08T11:21:29,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742239_1415 (size=12151) 2024-12-08T11:21:29,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656949527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656949528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:29,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656949530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-08T11:21:29,561 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-08T11:21:29,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-08T11:21:29,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T11:21:29,563 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:29,564 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:29,564 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:29,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T11:21:29,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/948ee3dd8eb54c3eb5dd84fe377bf03f 2024-12-08T11:21:29,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/34d8cc2acd9b406a851a61c59e0423d6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/34d8cc2acd9b406a851a61c59e0423d6 2024-12-08T11:21:29,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/34d8cc2acd9b406a851a61c59e0423d6, entries=200, sequenceid=170, filesize=38.8 K 2024-12-08T11:21:29,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/1dbbc8e1c46249059bd69d55dd4ffae6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/1dbbc8e1c46249059bd69d55dd4ffae6 2024-12-08T11:21:29,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/1dbbc8e1c46249059bd69d55dd4ffae6, entries=150, sequenceid=170, filesize=11.9 K 2024-12-08T11:21:29,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/948ee3dd8eb54c3eb5dd84fe377bf03f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/948ee3dd8eb54c3eb5dd84fe377bf03f 2024-12-08T11:21:29,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/948ee3dd8eb54c3eb5dd84fe377bf03f, entries=150, sequenceid=170, filesize=11.9 K 2024-12-08T11:21:29,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 548fe72b592216ea9ca6f0d238246b28 in 894ms, sequenceid=170, compaction requested=true 2024-12-08T11:21:29,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:29,696 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:29,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:29,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:29,696 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:29,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:29,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:29,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:29,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:29,697 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 150288 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:29,697 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/A is initiating minor compaction (all files) 2024-12-08T11:21:29,697 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/A in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:29,697 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/641d6976d00c46b8b70e2e510ab02e9f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bc25eb08fac5456ba8663f746dcb1b17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/e21b8513f05f45b584efbc17a1a46fdf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/34d8cc2acd9b406a851a61c59e0423d6] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=146.8 K 2024-12-08T11:21:29,697 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:29,697 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:29,697 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/641d6976d00c46b8b70e2e510ab02e9f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bc25eb08fac5456ba8663f746dcb1b17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/e21b8513f05f45b584efbc17a1a46fdf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/34d8cc2acd9b406a851a61c59e0423d6] 2024-12-08T11:21:29,697 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/B is initiating minor compaction (all files) 2024-12-08T11:21:29,697 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/B in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:29,697 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a2a68e3d2f6e4aa5b59408743946047b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/828bfb8fec2a445c8e73b88852c62deb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/00e3d89f519f49bd991f29459f098dcb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/1dbbc8e1c46249059bd69d55dd4ffae6] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=47.5 K 2024-12-08T11:21:29,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 641d6976d00c46b8b70e2e510ab02e9f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656886352 2024-12-08T11:21:29,698 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a2a68e3d2f6e4aa5b59408743946047b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656886352 2024-12-08T11:21:29,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc25eb08fac5456ba8663f746dcb1b17, keycount=250, bloomtype=ROW, size=47.1 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656886674 2024-12-08T11:21:29,698 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 828bfb8fec2a445c8e73b88852c62deb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656887291 2024-12-08T11:21:29,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e21b8513f05f45b584efbc17a1a46fdf, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733656887338 2024-12-08T11:21:29,698 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 00e3d89f519f49bd991f29459f098dcb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733656887338 2024-12-08T11:21:29,700 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34d8cc2acd9b406a851a61c59e0423d6, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656888482 2024-12-08T11:21:29,700 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dbbc8e1c46249059bd69d55dd4ffae6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656888482 2024-12-08T11:21:29,711 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:29,715 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:29,715 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-08T11:21:29,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:29,716 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:21:29,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:29,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:29,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:29,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:29,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:29,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:29,722 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#B#compaction#355 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:29,723 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/30487f3a570040d9a4d932d0dd54467d is 50, key is test_row_0/B:col10/1733656888490/Put/seqid=0 2024-12-08T11:21:29,724 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120819a101284982442e95678d1b114ea754_548fe72b592216ea9ca6f0d238246b28 store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:29,727 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120819a101284982442e95678d1b114ea754_548fe72b592216ea9ca6f0d238246b28, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:29,727 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120819a101284982442e95678d1b114ea754_548fe72b592216ea9ca6f0d238246b28 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:29,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208728c97baf872479691b69d3d3b6fe9aa_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656888911/Put/seqid=0 2024-12-08T11:21:29,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742240_1416 (size=12527) 2024-12-08T11:21:29,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742241_1417 (size=4469) 2024-12-08T11:21:29,786 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#A#compaction#354 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:29,786 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d4820afed988453ba11f9cfb9f0f703b is 175, key is test_row_0/A:col10/1733656888490/Put/seqid=0 2024-12-08T11:21:29,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742242_1418 (size=12304) 2024-12-08T11:21:29,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742243_1419 (size=31481) 2024-12-08T11:21:29,812 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d4820afed988453ba11f9cfb9f0f703b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4820afed988453ba11f9cfb9f0f703b 2024-12-08T11:21:29,817 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/A of 548fe72b592216ea9ca6f0d238246b28 into d4820afed988453ba11f9cfb9f0f703b(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:29,817 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:29,818 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/A, priority=12, startTime=1733656889695; duration=0sec 2024-12-08T11:21:29,818 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:29,818 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:A 2024-12-08T11:21:29,818 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:29,819 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48644 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:29,819 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/C is initiating minor compaction (all files) 2024-12-08T11:21:29,819 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/C in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:29,819 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/38933341932c44a699c09f2c7efe64d9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/68986711a7c646888264ae7824690d75, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2306a92d11b048f3a811a5fb75f8b724, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/948ee3dd8eb54c3eb5dd84fe377bf03f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=47.5 K 2024-12-08T11:21:29,819 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38933341932c44a699c09f2c7efe64d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656886352 2024-12-08T11:21:29,820 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68986711a7c646888264ae7824690d75, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1733656887291 2024-12-08T11:21:29,820 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2306a92d11b048f3a811a5fb75f8b724, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1733656887338 2024-12-08T11:21:29,820 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 948ee3dd8eb54c3eb5dd84fe377bf03f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656888482 2024-12-08T11:21:29,829 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#C#compaction#357 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:29,829 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/e70b5ccbb4c24a1b99bac558917a7b58 is 50, key is test_row_0/C:col10/1733656888490/Put/seqid=0 2024-12-08T11:21:29,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742244_1420 (size=12527) 2024-12-08T11:21:29,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T11:21:30,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:30,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:30,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656950056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656950057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,066 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656950059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656950161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T11:21:30,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656950167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656950168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,182 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/30487f3a570040d9a4d932d0dd54467d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/30487f3a570040d9a4d932d0dd54467d 2024-12-08T11:21:30,186 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/B of 548fe72b592216ea9ca6f0d238246b28 into 30487f3a570040d9a4d932d0dd54467d(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:30,186 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:30,187 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/B, priority=12, startTime=1733656889696; duration=0sec 2024-12-08T11:21:30,187 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:30,187 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:B 2024-12-08T11:21:30,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:30,208 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208728c97baf872479691b69d3d3b6fe9aa_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208728c97baf872479691b69d3d3b6fe9aa_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:30,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c0706121c6ec4895b8b6c0af9900dedb, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:30,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c0706121c6ec4895b8b6c0af9900dedb is 175, key is test_row_0/A:col10/1733656888911/Put/seqid=0 2024-12-08T11:21:30,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742245_1421 (size=31105) 2024-12-08T11:21:30,254 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/e70b5ccbb4c24a1b99bac558917a7b58 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e70b5ccbb4c24a1b99bac558917a7b58 2024-12-08T11:21:30,258 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/C of 548fe72b592216ea9ca6f0d238246b28 into e70b5ccbb4c24a1b99bac558917a7b58(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:30,258 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:30,258 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/C, priority=12, startTime=1733656889696; duration=0sec 2024-12-08T11:21:30,259 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:30,259 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:C 2024-12-08T11:21:30,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656950365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656950374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656950374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656950503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,509 DEBUG [Thread-1660 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:30,523 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656950518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,523 DEBUG [Thread-1662 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4165 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:30,614 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=194, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c0706121c6ec4895b8b6c0af9900dedb 2024-12-08T11:21:30,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/ae1cff321a5048abab4e868871066409 is 50, key is test_row_0/B:col10/1733656888911/Put/seqid=0 2024-12-08T11:21:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742246_1422 (size=12151) 2024-12-08T11:21:30,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T11:21:30,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656950673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656950680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:30,685 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:30,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656950680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:31,027 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/ae1cff321a5048abab4e868871066409 2024-12-08T11:21:31,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/5a1a30aafbeb425fb182ab568c82f97e is 50, key is test_row_0/C:col10/1733656888911/Put/seqid=0 2024-12-08T11:21:31,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742247_1423 (size=12151) 2024-12-08T11:21:31,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:31,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656951181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:31,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:31,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656951187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:31,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:31,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656951189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:31,443 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/5a1a30aafbeb425fb182ab568c82f97e 2024-12-08T11:21:31,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/c0706121c6ec4895b8b6c0af9900dedb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c0706121c6ec4895b8b6c0af9900dedb 2024-12-08T11:21:31,456 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c0706121c6ec4895b8b6c0af9900dedb, entries=150, sequenceid=194, filesize=30.4 K 2024-12-08T11:21:31,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/ae1cff321a5048abab4e868871066409 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/ae1cff321a5048abab4e868871066409 2024-12-08T11:21:31,462 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/ae1cff321a5048abab4e868871066409, entries=150, sequenceid=194, filesize=11.9 K 2024-12-08T11:21:31,463 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/5a1a30aafbeb425fb182ab568c82f97e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/5a1a30aafbeb425fb182ab568c82f97e 2024-12-08T11:21:31,468 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/5a1a30aafbeb425fb182ab568c82f97e, entries=150, sequenceid=194, filesize=11.9 K 2024-12-08T11:21:31,469 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 548fe72b592216ea9ca6f0d238246b28 in 1754ms, sequenceid=194, compaction requested=false 2024-12-08T11:21:31,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:31,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:31,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-08T11:21:31,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-08T11:21:31,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-08T11:21:31,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9070 sec 2024-12-08T11:21:31,474 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.9110 sec 2024-12-08T11:21:31,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-08T11:21:31,667 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-08T11:21:31,669 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-08T11:21:31,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T11:21:31,671 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:31,671 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:31,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T11:21:31,823 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:31,823 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-08T11:21:31,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:31,824 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:21:31,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:31,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:31,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:31,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:31,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:31,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:31,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f646a1c98e404947bdd140bf566cabaa_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656890056/Put/seqid=0 2024-12-08T11:21:31,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742248_1424 (size=12304) 2024-12-08T11:21:31,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T11:21:32,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:32,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656952241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656952243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656952244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:32,268 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f646a1c98e404947bdd140bf566cabaa_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f646a1c98e404947bdd140bf566cabaa_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:32,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2830ed27edbf4dc483e08857e20dd2e8, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:32,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2830ed27edbf4dc483e08857e20dd2e8 is 175, key is test_row_0/A:col10/1733656890056/Put/seqid=0 2024-12-08T11:21:32,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T11:21:32,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742249_1425 (size=31105) 2024-12-08T11:21:32,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656952345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656952352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656952352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656952550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656952559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656952560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,674 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2830ed27edbf4dc483e08857e20dd2e8 2024-12-08T11:21:32,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/8ebe81c249a245c3babc4a742eceb332 is 50, key is test_row_0/B:col10/1733656890056/Put/seqid=0 2024-12-08T11:21:32,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742250_1426 (size=12151) 2024-12-08T11:21:32,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T11:21:32,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656952856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656952865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:32,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:32,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656952865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:33,085 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/8ebe81c249a245c3babc4a742eceb332 2024-12-08T11:21:33,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/64a818e188544ed5b2787121aa7d762d is 50, key is test_row_0/C:col10/1733656890056/Put/seqid=0 2024-12-08T11:21:33,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742251_1427 (size=12151) 2024-12-08T11:21:33,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:33,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656953361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:33,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:33,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656953373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:33,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:33,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656953373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:33,499 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/64a818e188544ed5b2787121aa7d762d 2024-12-08T11:21:33,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2830ed27edbf4dc483e08857e20dd2e8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2830ed27edbf4dc483e08857e20dd2e8 2024-12-08T11:21:33,510 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2830ed27edbf4dc483e08857e20dd2e8, entries=150, sequenceid=209, filesize=30.4 K 2024-12-08T11:21:33,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/8ebe81c249a245c3babc4a742eceb332 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8ebe81c249a245c3babc4a742eceb332 2024-12-08T11:21:33,515 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8ebe81c249a245c3babc4a742eceb332, entries=150, sequenceid=209, filesize=11.9 K 2024-12-08T11:21:33,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/64a818e188544ed5b2787121aa7d762d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/64a818e188544ed5b2787121aa7d762d 2024-12-08T11:21:33,519 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/64a818e188544ed5b2787121aa7d762d, entries=150, sequenceid=209, filesize=11.9 K 2024-12-08T11:21:33,520 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 548fe72b592216ea9ca6f0d238246b28 in 1696ms, sequenceid=209, compaction requested=true 2024-12-08T11:21:33,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:33,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:33,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-08T11:21:33,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-08T11:21:33,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-08T11:21:33,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8500 sec 2024-12-08T11:21:33,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 1.8530 sec 2024-12-08T11:21:33,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-08T11:21:33,774 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-08T11:21:33,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:33,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-12-08T11:21:33,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T11:21:33,777 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:33,778 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:33,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:33,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T11:21:33,930 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:33,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-12-08T11:21:33,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:33,930 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:21:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:33,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:33,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085529016f7aad4d16b771b7ad0cb5e738_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656892232/Put/seqid=0 2024-12-08T11:21:33,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742252_1428 (size=12304) 2024-12-08T11:21:34,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T11:21:34,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:34,350 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085529016f7aad4d16b771b7ad0cb5e738_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085529016f7aad4d16b771b7ad0cb5e738_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:34,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3732e55470574c1ea5661ee43c1c57c9, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:34,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3732e55470574c1ea5661ee43c1c57c9 is 175, key is test_row_0/A:col10/1733656892232/Put/seqid=0 2024-12-08T11:21:34,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742253_1429 (size=31105) 2024-12-08T11:21:34,371 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3732e55470574c1ea5661ee43c1c57c9 2024-12-08T11:21:34,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/753c26e452be4afe864f854446ab775b is 50, key is test_row_0/B:col10/1733656892232/Put/seqid=0 2024-12-08T11:21:34,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T11:21:34,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742254_1430 (size=12151) 2024-12-08T11:21:34,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:34,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:34,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656954399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656954400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,416 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656954406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656954508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656954510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656954517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52394 deadline: 1733656954532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,537 DEBUG [Thread-1660 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8176 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:34,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52328 deadline: 1733656954556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,562 DEBUG [Thread-1662 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8203 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:34,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656954712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656954717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:34,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656954722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:34,783 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/753c26e452be4afe864f854446ab775b 2024-12-08T11:21:34,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/4f06cafdfa764ef7b3dd65dc5e86c3cb is 50, key is test_row_0/C:col10/1733656892232/Put/seqid=0 2024-12-08T11:21:34,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742255_1431 (size=12151) 2024-12-08T11:21:34,802 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/4f06cafdfa764ef7b3dd65dc5e86c3cb 2024-12-08T11:21:34,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3732e55470574c1ea5661ee43c1c57c9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3732e55470574c1ea5661ee43c1c57c9 2024-12-08T11:21:34,809 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3732e55470574c1ea5661ee43c1c57c9, entries=150, sequenceid=233, filesize=30.4 K 2024-12-08T11:21:34,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/753c26e452be4afe864f854446ab775b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/753c26e452be4afe864f854446ab775b 2024-12-08T11:21:34,814 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/753c26e452be4afe864f854446ab775b, entries=150, sequenceid=233, filesize=11.9 K 2024-12-08T11:21:34,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/4f06cafdfa764ef7b3dd65dc5e86c3cb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/4f06cafdfa764ef7b3dd65dc5e86c3cb 2024-12-08T11:21:34,818 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/4f06cafdfa764ef7b3dd65dc5e86c3cb, entries=150, sequenceid=233, filesize=11.9 K 2024-12-08T11:21:34,819 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 548fe72b592216ea9ca6f0d238246b28 in 889ms, sequenceid=233, compaction requested=true 2024-12-08T11:21:34,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:34,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:34,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-12-08T11:21:34,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-12-08T11:21:34,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-08T11:21:34,822 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0430 sec 2024-12-08T11:21:34,823 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.0460 sec 2024-12-08T11:21:34,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-08T11:21:34,880 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-08T11:21:34,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:34,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-12-08T11:21:34,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T11:21:34,884 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:34,885 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:34,885 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:34,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T11:21:35,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:35,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:21:35,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:35,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:35,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:35,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:35,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:35,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:35,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208053a5bf78a0a4a419c5568b926e67795_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656895021/Put/seqid=0 2024-12-08T11:21:35,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742256_1432 (size=14794) 2024-12-08T11:21:35,034 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:35,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:35,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:35,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,040 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208053a5bf78a0a4a419c5568b926e67795_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208053a5bf78a0a4a419c5568b926e67795_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:35,040 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/819db1e0338d4724b086e1903958496a, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:35,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/819db1e0338d4724b086e1903958496a is 175, key is test_row_0/A:col10/1733656895021/Put/seqid=0 2024-12-08T11:21:35,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742257_1433 (size=39749) 2024-12-08T11:21:35,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656955076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,086 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656955081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656955084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T11:21:35,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:35,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656955185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656955187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,190 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656955188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,342 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:35,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:35,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656955391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656955392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656955392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,446 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=246, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/819db1e0338d4724b086e1903958496a 2024-12-08T11:21:35,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/8b70177a0429414ebe808db4cc699c32 is 50, key is test_row_0/B:col10/1733656895021/Put/seqid=0 2024-12-08T11:21:35,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742258_1434 (size=12151) 2024-12-08T11:21:35,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T11:21:35,495 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,648 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,649 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656955700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656955701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,709 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:35,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656955701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,801 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,801 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:35,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:35,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,802 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/8b70177a0429414ebe808db4cc699c32 2024-12-08T11:21:35,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/39f6ce9a4239472e8ffbb038118221ed is 50, key is test_row_0/C:col10/1733656895021/Put/seqid=0 2024-12-08T11:21:35,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742259_1435 (size=12151) 2024-12-08T11:21:35,953 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:35,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:35,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:35,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:35,954 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:35,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T11:21:36,106 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:36,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:36,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:36,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,107 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:36,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:36,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:36,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:36,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656956210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:36,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656956213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:36,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:36,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656956213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:36,259 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:36,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:36,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:36,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:36,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:36,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:36,270 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/39f6ce9a4239472e8ffbb038118221ed 2024-12-08T11:21:36,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/819db1e0338d4724b086e1903958496a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/819db1e0338d4724b086e1903958496a 2024-12-08T11:21:36,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/819db1e0338d4724b086e1903958496a, entries=200, sequenceid=246, filesize=38.8 K 2024-12-08T11:21:36,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/8b70177a0429414ebe808db4cc699c32 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8b70177a0429414ebe808db4cc699c32 2024-12-08T11:21:36,282 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8b70177a0429414ebe808db4cc699c32, entries=150, sequenceid=246, filesize=11.9 K 2024-12-08T11:21:36,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/39f6ce9a4239472e8ffbb038118221ed as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/39f6ce9a4239472e8ffbb038118221ed 2024-12-08T11:21:36,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/39f6ce9a4239472e8ffbb038118221ed, entries=150, sequenceid=246, filesize=11.9 K 2024-12-08T11:21:36,287 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 548fe72b592216ea9ca6f0d238246b28 in 1265ms, sequenceid=246, compaction requested=true 2024-12-08T11:21:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:36,287 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-08T11:21:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:36,287 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:36,287 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-08T11:21:36,288 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 164545 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-08T11:21:36,288 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61131 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-08T11:21:36,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/A is initiating minor compaction (all files) 2024-12-08T11:21:36,289 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/B is initiating minor compaction (all files) 2024-12-08T11:21:36,289 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/B in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,289 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/A in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,289 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/30487f3a570040d9a4d932d0dd54467d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/ae1cff321a5048abab4e868871066409, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8ebe81c249a245c3babc4a742eceb332, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/753c26e452be4afe864f854446ab775b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8b70177a0429414ebe808db4cc699c32] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=59.7 K 2024-12-08T11:21:36,289 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4820afed988453ba11f9cfb9f0f703b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c0706121c6ec4895b8b6c0af9900dedb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2830ed27edbf4dc483e08857e20dd2e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3732e55470574c1ea5661ee43c1c57c9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/819db1e0338d4724b086e1903958496a] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=160.7 K 2024-12-08T11:21:36,289 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4820afed988453ba11f9cfb9f0f703b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c0706121c6ec4895b8b6c0af9900dedb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2830ed27edbf4dc483e08857e20dd2e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3732e55470574c1ea5661ee43c1c57c9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/819db1e0338d4724b086e1903958496a] 2024-12-08T11:21:36,289 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 30487f3a570040d9a4d932d0dd54467d, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656888482 2024-12-08T11:21:36,289 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4820afed988453ba11f9cfb9f0f703b, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656888482 2024-12-08T11:21:36,290 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ae1cff321a5048abab4e868871066409, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656888896 2024-12-08T11:21:36,290 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0706121c6ec4895b8b6c0af9900dedb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656888896 2024-12-08T11:21:36,290 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ebe81c249a245c3babc4a742eceb332, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733656890054 2024-12-08T11:21:36,290 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2830ed27edbf4dc483e08857e20dd2e8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733656890054 2024-12-08T11:21:36,290 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 753c26e452be4afe864f854446ab775b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733656892232 2024-12-08T11:21:36,290 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3732e55470574c1ea5661ee43c1c57c9, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733656892232 2024-12-08T11:21:36,291 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b70177a0429414ebe808db4cc699c32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733656894395 2024-12-08T11:21:36,291 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 819db1e0338d4724b086e1903958496a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733656894387 2024-12-08T11:21:36,302 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:36,310 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208cb7bf7757bfe49bd9959f55d16d8a813_548fe72b592216ea9ca6f0d238246b28 store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:36,312 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#B#compaction#370 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:36,313 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/3fd6724fc03042c0af6fe66b6bb6d896 is 50, key is test_row_0/B:col10/1733656895021/Put/seqid=0 2024-12-08T11:21:36,313 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208cb7bf7757bfe49bd9959f55d16d8a813_548fe72b592216ea9ca6f0d238246b28, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:36,313 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208cb7bf7757bfe49bd9959f55d16d8a813_548fe72b592216ea9ca6f0d238246b28 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:36,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742260_1436 (size=4469) 2024-12-08T11:21:36,326 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#A#compaction#369 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:36,326 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2f3aba623c804263bf6d533d03a660bc is 175, key is test_row_0/A:col10/1733656895021/Put/seqid=0 2024-12-08T11:21:36,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742261_1437 (size=12697) 2024-12-08T11:21:36,340 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/3fd6724fc03042c0af6fe66b6bb6d896 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/3fd6724fc03042c0af6fe66b6bb6d896 2024-12-08T11:21:36,349 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/B of 548fe72b592216ea9ca6f0d238246b28 into 3fd6724fc03042c0af6fe66b6bb6d896(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:36,349 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:36,349 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/B, priority=11, startTime=1733656896287; duration=0sec 2024-12-08T11:21:36,349 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:36,349 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:B 2024-12-08T11:21:36,349 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-08T11:21:36,353 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61131 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-08T11:21:36,353 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/C is initiating minor compaction (all files) 2024-12-08T11:21:36,353 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/C in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,354 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e70b5ccbb4c24a1b99bac558917a7b58, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/5a1a30aafbeb425fb182ab568c82f97e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/64a818e188544ed5b2787121aa7d762d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/4f06cafdfa764ef7b3dd65dc5e86c3cb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/39f6ce9a4239472e8ffbb038118221ed] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=59.7 K 2024-12-08T11:21:36,354 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e70b5ccbb4c24a1b99bac558917a7b58, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1733656888482 2024-12-08T11:21:36,354 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a1a30aafbeb425fb182ab568c82f97e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656888896 2024-12-08T11:21:36,354 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 64a818e188544ed5b2787121aa7d762d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1733656890054 2024-12-08T11:21:36,355 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f06cafdfa764ef7b3dd65dc5e86c3cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1733656892232 2024-12-08T11:21:36,355 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 39f6ce9a4239472e8ffbb038118221ed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733656894395 2024-12-08T11:21:36,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742262_1438 (size=31651) 2024-12-08T11:21:36,368 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/2f3aba623c804263bf6d533d03a660bc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2f3aba623c804263bf6d533d03a660bc 2024-12-08T11:21:36,374 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/A of 548fe72b592216ea9ca6f0d238246b28 into 2f3aba623c804263bf6d533d03a660bc(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:36,374 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:36,374 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/A, priority=11, startTime=1733656896287; duration=0sec 2024-12-08T11:21:36,375 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:36,375 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:A 2024-12-08T11:21:36,375 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#C#compaction#371 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:36,376 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/e8e6ce54ef0248ed9f2de57aca86a143 is 50, key is test_row_0/C:col10/1733656895021/Put/seqid=0 2024-12-08T11:21:36,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742263_1439 (size=12697) 2024-12-08T11:21:36,389 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/e8e6ce54ef0248ed9f2de57aca86a143 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e8e6ce54ef0248ed9f2de57aca86a143 2024-12-08T11:21:36,396 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/C of 548fe72b592216ea9ca6f0d238246b28 into e8e6ce54ef0248ed9f2de57aca86a143(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:36,396 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:36,396 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/C, priority=11, startTime=1733656896287; duration=0sec 2024-12-08T11:21:36,396 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:36,396 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:C 2024-12-08T11:21:36,412 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:36,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-12-08T11:21:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:36,413 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T11:21:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:36,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:36,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208878ab0da4cea418381159a32018f9321_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656895071/Put/seqid=0 2024-12-08T11:21:36,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742264_1440 (size=12454) 2024-12-08T11:21:36,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:36,837 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208878ab0da4cea418381159a32018f9321_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208878ab0da4cea418381159a32018f9321_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:36,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/b3ed9af63b6e46548c9f6d495f37b370, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:36,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/b3ed9af63b6e46548c9f6d495f37b370 is 175, key is test_row_0/A:col10/1733656895071/Put/seqid=0 2024-12-08T11:21:36,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742265_1441 (size=31255) 2024-12-08T11:21:36,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T11:21:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:37,229 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:37,243 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/b3ed9af63b6e46548c9f6d495f37b370 2024-12-08T11:21:37,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656957242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656957243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656957244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/402b4dc722d242e5bca1e32ca37847ee is 50, key is test_row_0/B:col10/1733656895071/Put/seqid=0 2024-12-08T11:21:37,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742266_1442 (size=12301) 2024-12-08T11:21:37,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656957349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656957349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656957350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656957554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656957553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656957557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,661 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/402b4dc722d242e5bca1e32ca37847ee 2024-12-08T11:21:37,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/f18a94f7e541492ab08010002e763fe8 is 50, key is test_row_0/C:col10/1733656895071/Put/seqid=0 2024-12-08T11:21:37,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742267_1443 (size=12301) 2024-12-08T11:21:37,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656957859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656957860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:37,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:37,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656957861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,073 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/f18a94f7e541492ab08010002e763fe8 2024-12-08T11:21:38,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/b3ed9af63b6e46548c9f6d495f37b370 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/b3ed9af63b6e46548c9f6d495f37b370 2024-12-08T11:21:38,082 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/b3ed9af63b6e46548c9f6d495f37b370, entries=150, sequenceid=272, filesize=30.5 K 2024-12-08T11:21:38,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/402b4dc722d242e5bca1e32ca37847ee as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/402b4dc722d242e5bca1e32ca37847ee 2024-12-08T11:21:38,087 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/402b4dc722d242e5bca1e32ca37847ee, entries=150, sequenceid=272, filesize=12.0 K 2024-12-08T11:21:38,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/f18a94f7e541492ab08010002e763fe8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/f18a94f7e541492ab08010002e763fe8 2024-12-08T11:21:38,092 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/f18a94f7e541492ab08010002e763fe8, entries=150, sequenceid=272, filesize=12.0 K 2024-12-08T11:21:38,093 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 548fe72b592216ea9ca6f0d238246b28 in 1680ms, sequenceid=272, compaction requested=false 2024-12-08T11:21:38,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:38,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:38,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-12-08T11:21:38,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-12-08T11:21:38,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-08T11:21:38,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2100 sec 2024-12-08T11:21:38,097 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 3.2150 sec 2024-12-08T11:21:38,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:38,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T11:21:38,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:38,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:38,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:38,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:38,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:38,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:38,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120840b63bf8c2744b0f8ea831fc56817bad_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656898367/Put/seqid=0 2024-12-08T11:21:38,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742268_1444 (size=17534) 2024-12-08T11:21:38,399 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:38,403 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120840b63bf8c2744b0f8ea831fc56817bad_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120840b63bf8c2744b0f8ea831fc56817bad_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:38,404 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/7413f258b8524153acd6bd8bdda916dd, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:38,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/7413f258b8524153acd6bd8bdda916dd is 175, key is test_row_0/A:col10/1733656898367/Put/seqid=0 2024-12-08T11:21:38,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742269_1445 (size=48639) 2024-12-08T11:21:38,413 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=287, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/7413f258b8524153acd6bd8bdda916dd 2024-12-08T11:21:38,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656958409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,421 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/c24c0db857bf4580a1e69e66f1e3ac12 is 50, key is test_row_0/B:col10/1733656898367/Put/seqid=0 2024-12-08T11:21:38,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656958417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656958418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742270_1446 (size=12301) 2024-12-08T11:21:38,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/c24c0db857bf4580a1e69e66f1e3ac12 2024-12-08T11:21:38,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/ca436db2ca2d4b10b7fd51ccad23b300 is 50, key is test_row_0/C:col10/1733656898367/Put/seqid=0 2024-12-08T11:21:38,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742271_1447 (size=12301) 2024-12-08T11:21:38,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/ca436db2ca2d4b10b7fd51ccad23b300 2024-12-08T11:21:38,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/7413f258b8524153acd6bd8bdda916dd as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/7413f258b8524153acd6bd8bdda916dd 2024-12-08T11:21:38,455 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/7413f258b8524153acd6bd8bdda916dd, entries=250, sequenceid=287, filesize=47.5 K 2024-12-08T11:21:38,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/c24c0db857bf4580a1e69e66f1e3ac12 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/c24c0db857bf4580a1e69e66f1e3ac12 2024-12-08T11:21:38,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/c24c0db857bf4580a1e69e66f1e3ac12, entries=150, sequenceid=287, filesize=12.0 K 2024-12-08T11:21:38,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/ca436db2ca2d4b10b7fd51ccad23b300 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/ca436db2ca2d4b10b7fd51ccad23b300 2024-12-08T11:21:38,464 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/ca436db2ca2d4b10b7fd51ccad23b300, entries=150, sequenceid=287, filesize=12.0 K 2024-12-08T11:21:38,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 548fe72b592216ea9ca6f0d238246b28 in 95ms, sequenceid=287, compaction requested=true 2024-12-08T11:21:38,464 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:38,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:38,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:38,465 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:38,465 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:38,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:38,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:38,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:38,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:38,466 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:38,466 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/B is initiating minor compaction (all files) 2024-12-08T11:21:38,466 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/B in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:38,466 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/3fd6724fc03042c0af6fe66b6bb6d896, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/402b4dc722d242e5bca1e32ca37847ee, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/c24c0db857bf4580a1e69e66f1e3ac12] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=36.4 K 2024-12-08T11:21:38,466 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111545 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:38,466 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/A is initiating minor compaction (all files) 2024-12-08T11:21:38,466 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/A in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:38,466 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2f3aba623c804263bf6d533d03a660bc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/b3ed9af63b6e46548c9f6d495f37b370, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/7413f258b8524153acd6bd8bdda916dd] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=108.9 K 2024-12-08T11:21:38,466 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:38,466 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2f3aba623c804263bf6d533d03a660bc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/b3ed9af63b6e46548c9f6d495f37b370, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/7413f258b8524153acd6bd8bdda916dd] 2024-12-08T11:21:38,467 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fd6724fc03042c0af6fe66b6bb6d896, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733656894395 2024-12-08T11:21:38,467 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f3aba623c804263bf6d533d03a660bc, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733656894395 2024-12-08T11:21:38,478 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 402b4dc722d242e5bca1e32ca37847ee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656895071 2024-12-08T11:21:38,478 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b3ed9af63b6e46548c9f6d495f37b370, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656895071 2024-12-08T11:21:38,479 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c24c0db857bf4580a1e69e66f1e3ac12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733656897240 2024-12-08T11:21:38,479 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7413f258b8524153acd6bd8bdda916dd, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733656897235 2024-12-08T11:21:38,490 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#B#compaction#378 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:38,490 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/fd55ca44153d47f4a5dc61680b6995d8 is 50, key is test_row_0/B:col10/1733656898367/Put/seqid=0 2024-12-08T11:21:38,496 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:38,500 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208307a31f4f7324fd7bae8dec0ac714059_548fe72b592216ea9ca6f0d238246b28 store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:38,501 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208307a31f4f7324fd7bae8dec0ac714059_548fe72b592216ea9ca6f0d238246b28, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:38,502 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208307a31f4f7324fd7bae8dec0ac714059_548fe72b592216ea9ca6f0d238246b28 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:38,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742272_1448 (size=12949) 2024-12-08T11:21:38,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:38,522 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:21:38,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:38,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:38,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:38,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:38,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:38,522 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:38,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742273_1449 (size=4469) 2024-12-08T11:21:38,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b5a43e448c714bd6903c47e4a859465b_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656898520/Put/seqid=0 2024-12-08T11:21:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742274_1450 (size=14994) 2024-12-08T11:21:38,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656958552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656958553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656958553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656958654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656958658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,663 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656958663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,862 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656958859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656958864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:38,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656958864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:38,918 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/fd55ca44153d47f4a5dc61680b6995d8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/fd55ca44153d47f4a5dc61680b6995d8 2024-12-08T11:21:38,922 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/B of 548fe72b592216ea9ca6f0d238246b28 into fd55ca44153d47f4a5dc61680b6995d8(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:38,922 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:38,922 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/B, priority=13, startTime=1733656898465; duration=0sec 2024-12-08T11:21:38,922 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:38,922 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:B 2024-12-08T11:21:38,922 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:38,923 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:38,923 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/C is initiating minor compaction (all files) 2024-12-08T11:21:38,923 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/C in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:38,923 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e8e6ce54ef0248ed9f2de57aca86a143, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/f18a94f7e541492ab08010002e763fe8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/ca436db2ca2d4b10b7fd51ccad23b300] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=36.4 K 2024-12-08T11:21:38,923 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e8e6ce54ef0248ed9f2de57aca86a143, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1733656894395 2024-12-08T11:21:38,924 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f18a94f7e541492ab08010002e763fe8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656895071 2024-12-08T11:21:38,924 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ca436db2ca2d4b10b7fd51ccad23b300, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733656897240 2024-12-08T11:21:38,925 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#A#compaction#379 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:38,926 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/8f8c6fc0bad54ca5931881b74958cf0b is 175, key is test_row_0/A:col10/1733656898367/Put/seqid=0 2024-12-08T11:21:38,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742275_1451 (size=31903) 2024-12-08T11:21:38,933 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:38,937 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b5a43e448c714bd6903c47e4a859465b_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b5a43e448c714bd6903c47e4a859465b_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:38,937 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/6fab4af9c98f4076a15fadbb6ecb380e, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:38,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/6fab4af9c98f4076a15fadbb6ecb380e is 175, key is test_row_0/A:col10/1733656898520/Put/seqid=0 2024-12-08T11:21:38,941 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#C#compaction#381 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:38,941 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/0bc604d95cf24d59a2abd70b04e293bc is 50, key is test_row_0/C:col10/1733656898367/Put/seqid=0 2024-12-08T11:21:38,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742276_1452 (size=39949) 2024-12-08T11:21:38,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742277_1453 (size=12949) 2024-12-08T11:21:38,974 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/0bc604d95cf24d59a2abd70b04e293bc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/0bc604d95cf24d59a2abd70b04e293bc 2024-12-08T11:21:38,978 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/C of 548fe72b592216ea9ca6f0d238246b28 into 0bc604d95cf24d59a2abd70b04e293bc(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:38,978 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:38,978 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/C, priority=13, startTime=1733656898465; duration=0sec 2024-12-08T11:21:38,978 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:38,978 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:C 2024-12-08T11:21:38,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-12-08T11:21:38,988 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-12-08T11:21:38,991 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:38,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-12-08T11:21:38,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T11:21:38,993 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:38,993 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:38,993 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T11:21:39,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,145 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:39,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:39,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,146 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656959163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656959173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656959173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T11:21:39,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,298 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:39,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:39,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,299 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,336 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/8f8c6fc0bad54ca5931881b74958cf0b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/8f8c6fc0bad54ca5931881b74958cf0b 2024-12-08T11:21:39,341 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/A of 548fe72b592216ea9ca6f0d238246b28 into 8f8c6fc0bad54ca5931881b74958cf0b(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:39,341 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:39,341 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/A, priority=13, startTime=1733656898465; duration=0sec 2024-12-08T11:21:39,341 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:39,341 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:A 2024-12-08T11:21:39,358 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=309, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/6fab4af9c98f4076a15fadbb6ecb380e 2024-12-08T11:21:39,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/aee09a3ba17844d5a51e20f3308fc273 is 50, key is test_row_0/B:col10/1733656898520/Put/seqid=0 2024-12-08T11:21:39,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742278_1454 (size=12301) 2024-12-08T11:21:39,451 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,451 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:39,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:39,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,452 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T11:21:39,604 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:39,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:39,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:39,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656959672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:39,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656959682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:39,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656959682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,757 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,757 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:39,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:39,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,758 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,769 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/aee09a3ba17844d5a51e20f3308fc273 2024-12-08T11:21:39,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/72c137688843440eb28753098137fde6 is 50, key is test_row_0/C:col10/1733656898520/Put/seqid=0 2024-12-08T11:21:39,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742279_1455 (size=12301) 2024-12-08T11:21:39,910 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:39,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:39,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:39,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:39,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:39,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:40,062 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:40,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:40,063 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:40,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T11:21:40,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/72c137688843440eb28753098137fde6 2024-12-08T11:21:40,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/6fab4af9c98f4076a15fadbb6ecb380e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/6fab4af9c98f4076a15fadbb6ecb380e 2024-12-08T11:21:40,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/6fab4af9c98f4076a15fadbb6ecb380e, entries=200, sequenceid=309, filesize=39.0 K 2024-12-08T11:21:40,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/aee09a3ba17844d5a51e20f3308fc273 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/aee09a3ba17844d5a51e20f3308fc273 2024-12-08T11:21:40,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/aee09a3ba17844d5a51e20f3308fc273, entries=150, sequenceid=309, filesize=12.0 K 2024-12-08T11:21:40,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/72c137688843440eb28753098137fde6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/72c137688843440eb28753098137fde6 2024-12-08T11:21:40,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/72c137688843440eb28753098137fde6, entries=150, sequenceid=309, filesize=12.0 K 2024-12-08T11:21:40,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 548fe72b592216ea9ca6f0d238246b28 in 1677ms, sequenceid=309, compaction requested=false 2024-12-08T11:21:40,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:40,215 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:40,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-12-08T11:21:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:40,216 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T11:21:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:40,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:40,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c8dacaed799a4fd6933d617ee8d591e0_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656898546/Put/seqid=0 2024-12-08T11:21:40,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742280_1456 (size=12454) 2024-12-08T11:21:40,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:40,635 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c8dacaed799a4fd6933d617ee8d591e0_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c8dacaed799a4fd6933d617ee8d591e0_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:40,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bbabbf4a00af4b358361c030eaee97c8, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:40,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bbabbf4a00af4b358361c030eaee97c8 is 175, key is test_row_0/A:col10/1733656898546/Put/seqid=0 2024-12-08T11:21:40,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742281_1457 (size=31255) 2024-12-08T11:21:40,659 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=327, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bbabbf4a00af4b358361c030eaee97c8 2024-12-08T11:21:40,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/83eb2d1a3ee74faeac4b17484cafbb91 is 50, key is test_row_0/B:col10/1733656898546/Put/seqid=0 2024-12-08T11:21:40,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742282_1458 (size=12301) 2024-12-08T11:21:40,675 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/83eb2d1a3ee74faeac4b17484cafbb91 2024-12-08T11:21:40,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b5e35381fee542fd9d6df33b476f1813 is 50, key is test_row_0/C:col10/1733656898546/Put/seqid=0 2024-12-08T11:21:40,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:40,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:40,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742283_1459 (size=12301) 2024-12-08T11:21:40,697 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b5e35381fee542fd9d6df33b476f1813 2024-12-08T11:21:40,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/bbabbf4a00af4b358361c030eaee97c8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bbabbf4a00af4b358361c030eaee97c8 2024-12-08T11:21:40,710 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bbabbf4a00af4b358361c030eaee97c8, entries=150, sequenceid=327, filesize=30.5 K 2024-12-08T11:21:40,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/83eb2d1a3ee74faeac4b17484cafbb91 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/83eb2d1a3ee74faeac4b17484cafbb91 2024-12-08T11:21:40,715 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/83eb2d1a3ee74faeac4b17484cafbb91, entries=150, sequenceid=327, filesize=12.0 K 2024-12-08T11:21:40,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/b5e35381fee542fd9d6df33b476f1813 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b5e35381fee542fd9d6df33b476f1813 2024-12-08T11:21:40,719 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b5e35381fee542fd9d6df33b476f1813, entries=150, sequenceid=327, filesize=12.0 K 2024-12-08T11:21:40,720 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=80.51 KB/82440 for 548fe72b592216ea9ca6f0d238246b28 in 504ms, sequenceid=327, compaction requested=true 2024-12-08T11:21:40,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:40,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:40,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-08T11:21:40,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-08T11:21:40,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-12-08T11:21:40,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:40,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:40,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:40,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:40,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:40,721 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:40,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-08T11:21:40,723 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7280 sec 2024-12-08T11:21:40,725 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 1.7320 sec 2024-12-08T11:21:40,728 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f8bbc039d3ea473387de29fdbd9b10bd_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656900713/Put/seqid=0 2024-12-08T11:21:40,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742284_1460 (size=17534) 2024-12-08T11:21:40,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656960758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:40,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656960760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:40,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:40,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656960761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:40,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:40,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656960865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:40,871 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:40,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656960867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:40,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656960868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656961071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656961072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656961073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-08T11:21:41,096 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-08T11:21:41,098 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:41,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees 2024-12-08T11:21:41,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T11:21:41,099 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:41,100 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=122, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:41,100 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:41,133 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:41,137 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f8bbc039d3ea473387de29fdbd9b10bd_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f8bbc039d3ea473387de29fdbd9b10bd_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:41,138 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/82f3960a6bf24e9fb029831efffc7fc6, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:41,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/82f3960a6bf24e9fb029831efffc7fc6 is 175, key is test_row_0/A:col10/1733656900713/Put/seqid=0 2024-12-08T11:21:41,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742285_1461 (size=48639) 2024-12-08T11:21:41,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T11:21:41,251 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,252 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-08T11:21:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:41,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,252 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656961375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656961376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656961380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T11:21:41,405 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,405 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-08T11:21:41,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:41,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,543 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=344, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/82f3960a6bf24e9fb029831efffc7fc6 2024-12-08T11:21:41,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/4e7dd9269da24324841cfb0bbe2a9954 is 50, key is test_row_0/B:col10/1733656900713/Put/seqid=0 2024-12-08T11:21:41,557 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,558 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-08T11:21:41,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742286_1462 (size=12301) 2024-12-08T11:21:41,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:41,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] handler.RSProcedureHandler(58): pid=123 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=123 java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=123 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:41,560 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/4e7dd9269da24324841cfb0bbe2a9954 2024-12-08T11:21:41,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/36908adfd3264385bad0a128df6c85c4 is 50, key is test_row_0/C:col10/1733656900713/Put/seqid=0 2024-12-08T11:21:41,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742287_1463 (size=12301) 2024-12-08T11:21:41,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=344 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/36908adfd3264385bad0a128df6c85c4 2024-12-08T11:21:41,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/82f3960a6bf24e9fb029831efffc7fc6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/82f3960a6bf24e9fb029831efffc7fc6 2024-12-08T11:21:41,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/82f3960a6bf24e9fb029831efffc7fc6, entries=250, sequenceid=344, filesize=47.5 K 2024-12-08T11:21:41,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/4e7dd9269da24324841cfb0bbe2a9954 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/4e7dd9269da24324841cfb0bbe2a9954 2024-12-08T11:21:41,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/4e7dd9269da24324841cfb0bbe2a9954, entries=150, sequenceid=344, filesize=12.0 K 2024-12-08T11:21:41,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/36908adfd3264385bad0a128df6c85c4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/36908adfd3264385bad0a128df6c85c4 2024-12-08T11:21:41,605 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/36908adfd3264385bad0a128df6c85c4, entries=150, sequenceid=344, filesize=12.0 K 2024-12-08T11:21:41,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 548fe72b592216ea9ca6f0d238246b28 in 885ms, sequenceid=344, compaction requested=true 2024-12-08T11:21:41,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:41,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:41,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:41,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:41,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:41,606 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:41,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:41,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:21:41,606 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:41,608 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:41,608 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151746 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:41,608 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/A is initiating minor compaction (all files) 2024-12-08T11:21:41,608 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/B is initiating minor compaction (all files) 2024-12-08T11:21:41,608 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/A in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,608 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/B in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,609 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/fd55ca44153d47f4a5dc61680b6995d8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/aee09a3ba17844d5a51e20f3308fc273, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/83eb2d1a3ee74faeac4b17484cafbb91, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/4e7dd9269da24324841cfb0bbe2a9954] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=48.7 K 2024-12-08T11:21:41,609 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/8f8c6fc0bad54ca5931881b74958cf0b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/6fab4af9c98f4076a15fadbb6ecb380e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bbabbf4a00af4b358361c030eaee97c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/82f3960a6bf24e9fb029831efffc7fc6] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=148.2 K 2024-12-08T11:21:41,609 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,609 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/8f8c6fc0bad54ca5931881b74958cf0b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/6fab4af9c98f4076a15fadbb6ecb380e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bbabbf4a00af4b358361c030eaee97c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/82f3960a6bf24e9fb029831efffc7fc6] 2024-12-08T11:21:41,609 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting fd55ca44153d47f4a5dc61680b6995d8, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733656897240 2024-12-08T11:21:41,609 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8f8c6fc0bad54ca5931881b74958cf0b, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733656897240 2024-12-08T11:21:41,609 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting aee09a3ba17844d5a51e20f3308fc273, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733656898415 2024-12-08T11:21:41,610 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6fab4af9c98f4076a15fadbb6ecb380e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733656898408 2024-12-08T11:21:41,610 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 83eb2d1a3ee74faeac4b17484cafbb91, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733656898544 2024-12-08T11:21:41,610 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e7dd9269da24324841cfb0bbe2a9954, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733656900711 2024-12-08T11:21:41,610 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbabbf4a00af4b358361c030eaee97c8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733656898544 2024-12-08T11:21:41,610 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82f3960a6bf24e9fb029831efffc7fc6, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733656900699 2024-12-08T11:21:41,621 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:41,621 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#B#compaction#390 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:41,622 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/7fb409763f6c4245801ab51b1d611ef9 is 50, key is test_row_0/B:col10/1733656900713/Put/seqid=0 2024-12-08T11:21:41,623 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412089ace292bab45429b84c91c44801e5041_548fe72b592216ea9ca6f0d238246b28 store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:41,625 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412089ace292bab45429b84c91c44801e5041_548fe72b592216ea9ca6f0d238246b28, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:41,625 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089ace292bab45429b84c91c44801e5041_548fe72b592216ea9ca6f0d238246b28 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:41,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742288_1464 (size=13085) 2024-12-08T11:21:41,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742289_1465 (size=4469) 2024-12-08T11:21:41,644 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#A#compaction#391 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:41,645 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/29819a9bd04b445f8d61443da8907e19 is 175, key is test_row_0/A:col10/1733656900713/Put/seqid=0 2024-12-08T11:21:41,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742290_1466 (size=32039) 2024-12-08T11:21:41,668 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/29819a9bd04b445f8d61443da8907e19 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/29819a9bd04b445f8d61443da8907e19 2024-12-08T11:21:41,672 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/A of 548fe72b592216ea9ca6f0d238246b28 into 29819a9bd04b445f8d61443da8907e19(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:41,673 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:41,673 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/A, priority=12, startTime=1733656901605; duration=0sec 2024-12-08T11:21:41,673 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:41,673 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:A 2024-12-08T11:21:41,673 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:21:41,674 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:21:41,674 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/C is initiating minor compaction (all files) 2024-12-08T11:21:41,674 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/C in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,674 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/0bc604d95cf24d59a2abd70b04e293bc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/72c137688843440eb28753098137fde6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b5e35381fee542fd9d6df33b476f1813, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/36908adfd3264385bad0a128df6c85c4] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=48.7 K 2024-12-08T11:21:41,674 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bc604d95cf24d59a2abd70b04e293bc, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1733656897240 2024-12-08T11:21:41,674 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72c137688843440eb28753098137fde6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1733656898415 2024-12-08T11:21:41,675 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5e35381fee542fd9d6df33b476f1813, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1733656898544 2024-12-08T11:21:41,675 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36908adfd3264385bad0a128df6c85c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733656900711 2024-12-08T11:21:41,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T11:21:41,706 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#C#compaction#392 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:41,707 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/95d99684e6b2441289c505a2bca9f395 is 50, key is test_row_0/C:col10/1733656900713/Put/seqid=0 2024-12-08T11:21:41,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=123 2024-12-08T11:21:41,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:41,712 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T11:21:41,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:41,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:41,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:41,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:41,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:41,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:41,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742291_1467 (size=13085) 2024-12-08T11:21:41,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208efab91a4d8544cc1a8e0f10041ce3280_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656900755/Put/seqid=0 2024-12-08T11:21:41,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742292_1468 (size=12454) 2024-12-08T11:21:41,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:41,760 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208efab91a4d8544cc1a8e0f10041ce3280_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208efab91a4d8544cc1a8e0f10041ce3280_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:41,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d4ebc3b91abc4d919eccd0590725f3c4, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:41,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d4ebc3b91abc4d919eccd0590725f3c4 is 175, key is test_row_0/A:col10/1733656900755/Put/seqid=0 2024-12-08T11:21:41,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742293_1469 (size=31255) 2024-12-08T11:21:41,771 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d4ebc3b91abc4d919eccd0590725f3c4 2024-12-08T11:21:41,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/cd85a8eaab16433dbed9b74983debae2 is 50, key is test_row_0/B:col10/1733656900755/Put/seqid=0 2024-12-08T11:21:41,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742294_1470 (size=12301) 2024-12-08T11:21:41,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:41,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. as already flushing 2024-12-08T11:21:41,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656961912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656961913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:41,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:41,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656961914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656962020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656962020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,026 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656962020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,042 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/7fb409763f6c4245801ab51b1d611ef9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/7fb409763f6c4245801ab51b1d611ef9 2024-12-08T11:21:42,046 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/B of 548fe72b592216ea9ca6f0d238246b28 into 7fb409763f6c4245801ab51b1d611ef9(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:42,046 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:42,046 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/B, priority=12, startTime=1733656901606; duration=0sec 2024-12-08T11:21:42,046 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:42,046 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:B 2024-12-08T11:21:42,118 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/95d99684e6b2441289c505a2bca9f395 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/95d99684e6b2441289c505a2bca9f395 2024-12-08T11:21:42,122 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/C of 548fe72b592216ea9ca6f0d238246b28 into 95d99684e6b2441289c505a2bca9f395(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:42,122 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:42,122 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/C, priority=12, startTime=1733656901606; duration=0sec 2024-12-08T11:21:42,122 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:42,122 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:C 2024-12-08T11:21:42,134 DEBUG [Thread-1675 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:63801 2024-12-08T11:21:42,134 DEBUG [Thread-1675 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:42,138 DEBUG [Thread-1671 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:63801 2024-12-08T11:21:42,138 DEBUG [Thread-1671 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:42,139 DEBUG [Thread-1677 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:63801 2024-12-08T11:21:42,140 DEBUG [Thread-1669 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:63801 2024-12-08T11:21:42,140 DEBUG [Thread-1677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:42,140 DEBUG [Thread-1669 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:42,143 DEBUG [Thread-1673 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:63801 2024-12-08T11:21:42,143 DEBUG [Thread-1673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:42,198 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/cd85a8eaab16433dbed9b74983debae2 2024-12-08T11:21:42,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T11:21:42,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/e24363e9023445fdabeed022f7afdec7 is 50, key is test_row_0/C:col10/1733656900755/Put/seqid=0 2024-12-08T11:21:42,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742295_1471 (size=12301) 2024-12-08T11:21:42,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656962226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656962226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,227 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656962227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52380 deadline: 1733656962528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52352 deadline: 1733656962529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:42,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52362 deadline: 1733656962530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:42,607 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/e24363e9023445fdabeed022f7afdec7 2024-12-08T11:21:42,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/d4ebc3b91abc4d919eccd0590725f3c4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4ebc3b91abc4d919eccd0590725f3c4 2024-12-08T11:21:42,613 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4ebc3b91abc4d919eccd0590725f3c4, entries=150, sequenceid=364, filesize=30.5 K 2024-12-08T11:21:42,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/cd85a8eaab16433dbed9b74983debae2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/cd85a8eaab16433dbed9b74983debae2 2024-12-08T11:21:42,617 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/cd85a8eaab16433dbed9b74983debae2, entries=150, sequenceid=364, filesize=12.0 K 2024-12-08T11:21:42,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/e24363e9023445fdabeed022f7afdec7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e24363e9023445fdabeed022f7afdec7 2024-12-08T11:21:42,620 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e24363e9023445fdabeed022f7afdec7, entries=150, sequenceid=364, filesize=12.0 K 2024-12-08T11:21:42,620 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 548fe72b592216ea9ca6f0d238246b28 in 908ms, sequenceid=364, compaction requested=false 2024-12-08T11:21:42,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:42,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:42,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-08T11:21:42,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-08T11:21:42,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-08T11:21:42,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5210 sec 2024-12-08T11:21:42,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=122, table=TestAcidGuarantees in 1.5240 sec 2024-12-08T11:21:43,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:43,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T11:21:43,034 DEBUG [Thread-1666 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:63801 2024-12-08T11:21:43,034 DEBUG [Thread-1658 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:63801 2024-12-08T11:21:43,034 DEBUG [Thread-1666 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:43,034 DEBUG [Thread-1658 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:43,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:43,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:43,034 DEBUG [Thread-1664 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:63801 2024-12-08T11:21:43,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:43,035 DEBUG [Thread-1664 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:43,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:43,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:43,035 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:43,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087734aa11c4cd4fcb931f872edf55538e_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_0/A:col10/1733656901912/Put/seqid=0 2024-12-08T11:21:43,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742296_1472 (size=12454) 2024-12-08T11:21:43,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-08T11:21:43,203 INFO [Thread-1668 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-12-08T11:21:43,443 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:43,447 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087734aa11c4cd4fcb931f872edf55538e_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087734aa11c4cd4fcb931f872edf55538e_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:43,447 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3c4b946b481e4af1aaab530cf4cde9ea, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:43,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3c4b946b481e4af1aaab530cf4cde9ea is 175, key is test_row_0/A:col10/1733656901912/Put/seqid=0 2024-12-08T11:21:43,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742297_1473 (size=31255) 2024-12-08T11:21:43,852 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=387, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3c4b946b481e4af1aaab530cf4cde9ea 2024-12-08T11:21:43,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/416ea4c23ac74302bc7e1861fc109404 is 50, key is test_row_0/B:col10/1733656901912/Put/seqid=0 2024-12-08T11:21:43,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742298_1474 (size=12301) 2024-12-08T11:21:44,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/416ea4c23ac74302bc7e1861fc109404 2024-12-08T11:21:44,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/bbca4ac67400415c95e98b077dbf0c65 is 50, key is test_row_0/C:col10/1733656901912/Put/seqid=0 2024-12-08T11:21:44,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742299_1475 (size=12301) 2024-12-08T11:21:44,573 DEBUG [Thread-1662 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:63801 2024-12-08T11:21:44,573 DEBUG [Thread-1662 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:44,620 DEBUG [Thread-1660 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:63801 2024-12-08T11:21:44,620 DEBUG [Thread-1660 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 96 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2177 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6531 rows 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2202 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6606 rows 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2193 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6577 rows 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2189 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6565 rows 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2187 2024-12-08T11:21:44,620 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6561 rows 2024-12-08T11:21:44,620 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T11:21:44,620 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68ad882f to 127.0.0.1:63801 2024-12-08T11:21:44,620 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:21:44,623 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T11:21:44,623 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T11:21:44,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T11:21:44,627 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656904627"}]},"ts":"1733656904627"} 2024-12-08T11:21:44,628 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T11:21:44,630 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T11:21:44,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:21:44,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, UNASSIGN}] 2024-12-08T11:21:44,634 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, UNASSIGN 2024-12-08T11:21:44,635 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:44,635 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:21:44,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:21:44,671 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/bbca4ac67400415c95e98b077dbf0c65 2024-12-08T11:21:44,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/3c4b946b481e4af1aaab530cf4cde9ea as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3c4b946b481e4af1aaab530cf4cde9ea 2024-12-08T11:21:44,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3c4b946b481e4af1aaab530cf4cde9ea, entries=150, sequenceid=387, filesize=30.5 K 2024-12-08T11:21:44,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/416ea4c23ac74302bc7e1861fc109404 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/416ea4c23ac74302bc7e1861fc109404 2024-12-08T11:21:44,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/416ea4c23ac74302bc7e1861fc109404, entries=150, sequenceid=387, filesize=12.0 K 2024-12-08T11:21:44,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/bbca4ac67400415c95e98b077dbf0c65 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/bbca4ac67400415c95e98b077dbf0c65 2024-12-08T11:21:44,684 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/bbca4ac67400415c95e98b077dbf0c65, entries=150, sequenceid=387, filesize=12.0 K 2024-12-08T11:21:44,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=13.42 KB/13740 for 548fe72b592216ea9ca6f0d238246b28 in 1650ms, sequenceid=387, compaction requested=true 2024-12-08T11:21:44,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:44,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:44,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:44,685 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:44,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:44,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:44,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 548fe72b592216ea9ca6f0d238246b28:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:44,685 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:44,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:44,685 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:44,685 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:44,685 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/B is initiating minor compaction (all files) 2024-12-08T11:21:44,685 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): 548fe72b592216ea9ca6f0d238246b28/A is initiating minor compaction (all files) 2024-12-08T11:21:44,685 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/A in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:44,685 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 548fe72b592216ea9ca6f0d238246b28/B in TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:44,685 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/29819a9bd04b445f8d61443da8907e19, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4ebc3b91abc4d919eccd0590725f3c4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3c4b946b481e4af1aaab530cf4cde9ea] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=92.3 K 2024-12-08T11:21:44,685 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/7fb409763f6c4245801ab51b1d611ef9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/cd85a8eaab16433dbed9b74983debae2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/416ea4c23ac74302bc7e1861fc109404] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp, totalSize=36.8 K 2024-12-08T11:21:44,685 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:44,686 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/29819a9bd04b445f8d61443da8907e19, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4ebc3b91abc4d919eccd0590725f3c4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3c4b946b481e4af1aaab530cf4cde9ea] 2024-12-08T11:21:44,686 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fb409763f6c4245801ab51b1d611ef9, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733656900711 2024-12-08T11:21:44,686 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29819a9bd04b445f8d61443da8907e19, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=344, earliestPutTs=1733656900711 2024-12-08T11:21:44,686 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting cd85a8eaab16433dbed9b74983debae2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733656900749 2024-12-08T11:21:44,686 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d4ebc3b91abc4d919eccd0590725f3c4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1733656900749 2024-12-08T11:21:44,686 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 416ea4c23ac74302bc7e1861fc109404, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733656901912 2024-12-08T11:21:44,686 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c4b946b481e4af1aaab530cf4cde9ea, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733656901912 2024-12-08T11:21:44,693 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#B#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:44,693 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:44,693 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/a60a849e714d40a495232a2fcd1ae07b is 50, key is test_row_0/B:col10/1733656901912/Put/seqid=0 2024-12-08T11:21:44,696 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412086712e1fd76bb4fda96ffd174a0f2847d_548fe72b592216ea9ca6f0d238246b28 store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:44,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742300_1476 (size=13187) 2024-12-08T11:21:44,717 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412086712e1fd76bb4fda96ffd174a0f2847d_548fe72b592216ea9ca6f0d238246b28, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:44,718 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412086712e1fd76bb4fda96ffd174a0f2847d_548fe72b592216ea9ca6f0d238246b28 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:44,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742301_1477 (size=4469) 2024-12-08T11:21:44,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T11:21:44,787 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:44,787 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:44,787 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:21:44,788 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 548fe72b592216ea9ca6f0d238246b28, disabling compactions & flushes 2024-12-08T11:21:44,788 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1942): waiting for 2 compactions to complete for region TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:44,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T11:21:45,105 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/a60a849e714d40a495232a2fcd1ae07b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a60a849e714d40a495232a2fcd1ae07b 2024-12-08T11:21:45,109 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/B of 548fe72b592216ea9ca6f0d238246b28 into a60a849e714d40a495232a2fcd1ae07b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:45,109 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:45,109 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/B, priority=13, startTime=1733656904685; duration=0sec 2024-12-08T11:21:45,109 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:45,109 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:B 2024-12-08T11:21:45,109 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. because compaction request was cancelled 2024-12-08T11:21:45,109 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:C 2024-12-08T11:21:45,122 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 548fe72b592216ea9ca6f0d238246b28#A#compaction#400 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:45,123 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/61ff64da8fcf497a9a8d598e0e44b8e6 is 175, key is test_row_0/A:col10/1733656901912/Put/seqid=0 2024-12-08T11:21:45,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742302_1478 (size=32141) 2024-12-08T11:21:45,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T11:21:45,530 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/61ff64da8fcf497a9a8d598e0e44b8e6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/61ff64da8fcf497a9a8d598e0e44b8e6 2024-12-08T11:21:45,534 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 548fe72b592216ea9ca6f0d238246b28/A of 548fe72b592216ea9ca6f0d238246b28 into 61ff64da8fcf497a9a8d598e0e44b8e6(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:45,534 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:45,534 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28., storeName=548fe72b592216ea9ca6f0d238246b28/A, priority=13, startTime=1733656904685; duration=0sec 2024-12-08T11:21:45,534 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:45,535 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:45,535 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 548fe72b592216ea9ca6f0d238246b28:A 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. after waiting 0 ms 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:45,535 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(2837): Flushing 548fe72b592216ea9ca6f0d238246b28 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=A 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=B 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 548fe72b592216ea9ca6f0d238246b28, store=C 2024-12-08T11:21:45,535 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:45,540 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087b26752a938e4679b2c018482e88a0c0_548fe72b592216ea9ca6f0d238246b28 is 50, key is test_row_1/A:col10/1733656904572/Put/seqid=0 2024-12-08T11:21:45,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742303_1479 (size=9914) 2024-12-08T11:21:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T11:21:45,944 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:45,948 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412087b26752a938e4679b2c018482e88a0c0_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087b26752a938e4679b2c018482e88a0c0_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:45,949 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/280fd2cfb51640f88eaef3c057f8511b, store: [table=TestAcidGuarantees family=A region=548fe72b592216ea9ca6f0d238246b28] 2024-12-08T11:21:45,949 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/280fd2cfb51640f88eaef3c057f8511b is 175, key is test_row_1/A:col10/1733656904572/Put/seqid=0 2024-12-08T11:21:45,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742304_1480 (size=22561) 2024-12-08T11:21:45,953 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=394, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/280fd2cfb51640f88eaef3c057f8511b 2024-12-08T11:21:45,958 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/92d755f2cc974da1a48cecf5daf12f6e is 50, key is test_row_1/B:col10/1733656904572/Put/seqid=0 2024-12-08T11:21:45,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742305_1481 (size=9857) 2024-12-08T11:21:46,366 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/92d755f2cc974da1a48cecf5daf12f6e 2024-12-08T11:21:46,371 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/2af0ce57650f45ad9be6b02f90096aea is 50, key is test_row_1/C:col10/1733656904572/Put/seqid=0 2024-12-08T11:21:46,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742306_1482 (size=9857) 2024-12-08T11:21:46,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T11:21:46,775 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=394 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/2af0ce57650f45ad9be6b02f90096aea 2024-12-08T11:21:46,778 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/A/280fd2cfb51640f88eaef3c057f8511b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/280fd2cfb51640f88eaef3c057f8511b 2024-12-08T11:21:46,781 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/280fd2cfb51640f88eaef3c057f8511b, entries=100, sequenceid=394, filesize=22.0 K 2024-12-08T11:21:46,782 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/B/92d755f2cc974da1a48cecf5daf12f6e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/92d755f2cc974da1a48cecf5daf12f6e 2024-12-08T11:21:46,784 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/92d755f2cc974da1a48cecf5daf12f6e, entries=100, sequenceid=394, filesize=9.6 K 2024-12-08T11:21:46,785 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/.tmp/C/2af0ce57650f45ad9be6b02f90096aea as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2af0ce57650f45ad9be6b02f90096aea 2024-12-08T11:21:46,787 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2af0ce57650f45ad9be6b02f90096aea, entries=100, sequenceid=394, filesize=9.6 K 2024-12-08T11:21:46,788 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 548fe72b592216ea9ca6f0d238246b28 in 1253ms, sequenceid=394, compaction requested=true 2024-12-08T11:21:46,788 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/079b88cabe5446f29b1e18640e550808, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3cb5aabe19f843a28602a437dac28a8c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/21fb16a060544f8d86ca817bdd824dbc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/04636050436a43f8bf1f324da8204c31, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d0fb6fc9f39b440eb8246ce54d35e755, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c1ea9d858d0141ed80b52d779b6ecab0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2539acadc08744489f1c5b11cf9d083b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/641d6976d00c46b8b70e2e510ab02e9f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bc25eb08fac5456ba8663f746dcb1b17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/e21b8513f05f45b584efbc17a1a46fdf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/34d8cc2acd9b406a851a61c59e0423d6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4820afed988453ba11f9cfb9f0f703b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c0706121c6ec4895b8b6c0af9900dedb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2830ed27edbf4dc483e08857e20dd2e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3732e55470574c1ea5661ee43c1c57c9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/819db1e0338d4724b086e1903958496a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2f3aba623c804263bf6d533d03a660bc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/b3ed9af63b6e46548c9f6d495f37b370, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/7413f258b8524153acd6bd8bdda916dd, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/8f8c6fc0bad54ca5931881b74958cf0b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/6fab4af9c98f4076a15fadbb6ecb380e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bbabbf4a00af4b358361c030eaee97c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/82f3960a6bf24e9fb029831efffc7fc6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/29819a9bd04b445f8d61443da8907e19, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4ebc3b91abc4d919eccd0590725f3c4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3c4b946b481e4af1aaab530cf4cde9ea] to archive 2024-12-08T11:21:46,789 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:46,790 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/079b88cabe5446f29b1e18640e550808 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/079b88cabe5446f29b1e18640e550808 2024-12-08T11:21:46,791 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3cb5aabe19f843a28602a437dac28a8c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3cb5aabe19f843a28602a437dac28a8c 2024-12-08T11:21:46,792 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/21fb16a060544f8d86ca817bdd824dbc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/21fb16a060544f8d86ca817bdd824dbc 2024-12-08T11:21:46,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/04636050436a43f8bf1f324da8204c31 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/04636050436a43f8bf1f324da8204c31 2024-12-08T11:21:46,793 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d0fb6fc9f39b440eb8246ce54d35e755 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d0fb6fc9f39b440eb8246ce54d35e755 2024-12-08T11:21:46,794 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c1ea9d858d0141ed80b52d779b6ecab0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c1ea9d858d0141ed80b52d779b6ecab0 2024-12-08T11:21:46,795 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2539acadc08744489f1c5b11cf9d083b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2539acadc08744489f1c5b11cf9d083b 2024-12-08T11:21:46,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/641d6976d00c46b8b70e2e510ab02e9f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/641d6976d00c46b8b70e2e510ab02e9f 2024-12-08T11:21:46,796 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bc25eb08fac5456ba8663f746dcb1b17 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bc25eb08fac5456ba8663f746dcb1b17 2024-12-08T11:21:46,797 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/e21b8513f05f45b584efbc17a1a46fdf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/e21b8513f05f45b584efbc17a1a46fdf 2024-12-08T11:21:46,798 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/34d8cc2acd9b406a851a61c59e0423d6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/34d8cc2acd9b406a851a61c59e0423d6 2024-12-08T11:21:46,799 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4820afed988453ba11f9cfb9f0f703b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4820afed988453ba11f9cfb9f0f703b 2024-12-08T11:21:46,800 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c0706121c6ec4895b8b6c0af9900dedb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/c0706121c6ec4895b8b6c0af9900dedb 2024-12-08T11:21:46,801 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2830ed27edbf4dc483e08857e20dd2e8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2830ed27edbf4dc483e08857e20dd2e8 2024-12-08T11:21:46,801 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3732e55470574c1ea5661ee43c1c57c9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3732e55470574c1ea5661ee43c1c57c9 2024-12-08T11:21:46,802 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/819db1e0338d4724b086e1903958496a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/819db1e0338d4724b086e1903958496a 2024-12-08T11:21:46,803 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2f3aba623c804263bf6d533d03a660bc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/2f3aba623c804263bf6d533d03a660bc 2024-12-08T11:21:46,804 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/b3ed9af63b6e46548c9f6d495f37b370 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/b3ed9af63b6e46548c9f6d495f37b370 2024-12-08T11:21:46,805 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/7413f258b8524153acd6bd8bdda916dd to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/7413f258b8524153acd6bd8bdda916dd 2024-12-08T11:21:46,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/8f8c6fc0bad54ca5931881b74958cf0b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/8f8c6fc0bad54ca5931881b74958cf0b 2024-12-08T11:21:46,806 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/6fab4af9c98f4076a15fadbb6ecb380e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/6fab4af9c98f4076a15fadbb6ecb380e 2024-12-08T11:21:46,807 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bbabbf4a00af4b358361c030eaee97c8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/bbabbf4a00af4b358361c030eaee97c8 2024-12-08T11:21:46,808 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/82f3960a6bf24e9fb029831efffc7fc6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/82f3960a6bf24e9fb029831efffc7fc6 2024-12-08T11:21:46,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/29819a9bd04b445f8d61443da8907e19 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/29819a9bd04b445f8d61443da8907e19 2024-12-08T11:21:46,809 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4ebc3b91abc4d919eccd0590725f3c4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/d4ebc3b91abc4d919eccd0590725f3c4 2024-12-08T11:21:46,810 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3c4b946b481e4af1aaab530cf4cde9ea to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/3c4b946b481e4af1aaab530cf4cde9ea 2024-12-08T11:21:46,811 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/2237d0c3503249969f14aa8bcd783471, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/039670f712d043f4a090d189c801c263, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/16cc3244384d4854b0bbd7045be74d0d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/78aa135ceba24bc5b1b33ad198cb9251, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/15a306e65cc5433590fa8f5d7f1d7c32, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/9befc3c0d73e49ff9c84eee4c8fe9f75, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a2a68e3d2f6e4aa5b59408743946047b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/31061eb786ed4af197fde66f589179a8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/828bfb8fec2a445c8e73b88852c62deb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/00e3d89f519f49bd991f29459f098dcb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/30487f3a570040d9a4d932d0dd54467d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/1dbbc8e1c46249059bd69d55dd4ffae6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/ae1cff321a5048abab4e868871066409, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8ebe81c249a245c3babc4a742eceb332, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/753c26e452be4afe864f854446ab775b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/3fd6724fc03042c0af6fe66b6bb6d896, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8b70177a0429414ebe808db4cc699c32, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/402b4dc722d242e5bca1e32ca37847ee, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/fd55ca44153d47f4a5dc61680b6995d8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/c24c0db857bf4580a1e69e66f1e3ac12, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/aee09a3ba17844d5a51e20f3308fc273, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/83eb2d1a3ee74faeac4b17484cafbb91, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/7fb409763f6c4245801ab51b1d611ef9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/4e7dd9269da24324841cfb0bbe2a9954, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/cd85a8eaab16433dbed9b74983debae2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/416ea4c23ac74302bc7e1861fc109404] to archive 2024-12-08T11:21:46,812 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:46,813 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/2237d0c3503249969f14aa8bcd783471 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/2237d0c3503249969f14aa8bcd783471 2024-12-08T11:21:46,814 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/039670f712d043f4a090d189c801c263 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/039670f712d043f4a090d189c801c263 2024-12-08T11:21:46,815 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/16cc3244384d4854b0bbd7045be74d0d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/16cc3244384d4854b0bbd7045be74d0d 2024-12-08T11:21:46,816 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/78aa135ceba24bc5b1b33ad198cb9251 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/78aa135ceba24bc5b1b33ad198cb9251 2024-12-08T11:21:46,816 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/15a306e65cc5433590fa8f5d7f1d7c32 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/15a306e65cc5433590fa8f5d7f1d7c32 2024-12-08T11:21:46,817 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/9befc3c0d73e49ff9c84eee4c8fe9f75 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/9befc3c0d73e49ff9c84eee4c8fe9f75 2024-12-08T11:21:46,818 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a2a68e3d2f6e4aa5b59408743946047b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a2a68e3d2f6e4aa5b59408743946047b 2024-12-08T11:21:46,819 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/31061eb786ed4af197fde66f589179a8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/31061eb786ed4af197fde66f589179a8 2024-12-08T11:21:46,820 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/828bfb8fec2a445c8e73b88852c62deb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/828bfb8fec2a445c8e73b88852c62deb 2024-12-08T11:21:46,821 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/00e3d89f519f49bd991f29459f098dcb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/00e3d89f519f49bd991f29459f098dcb 2024-12-08T11:21:46,821 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/30487f3a570040d9a4d932d0dd54467d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/30487f3a570040d9a4d932d0dd54467d 2024-12-08T11:21:46,822 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/1dbbc8e1c46249059bd69d55dd4ffae6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/1dbbc8e1c46249059bd69d55dd4ffae6 2024-12-08T11:21:46,823 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/ae1cff321a5048abab4e868871066409 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/ae1cff321a5048abab4e868871066409 2024-12-08T11:21:46,824 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8ebe81c249a245c3babc4a742eceb332 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8ebe81c249a245c3babc4a742eceb332 2024-12-08T11:21:46,825 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/753c26e452be4afe864f854446ab775b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/753c26e452be4afe864f854446ab775b 2024-12-08T11:21:46,826 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/3fd6724fc03042c0af6fe66b6bb6d896 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/3fd6724fc03042c0af6fe66b6bb6d896 2024-12-08T11:21:46,827 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8b70177a0429414ebe808db4cc699c32 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/8b70177a0429414ebe808db4cc699c32 2024-12-08T11:21:46,828 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/402b4dc722d242e5bca1e32ca37847ee to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/402b4dc722d242e5bca1e32ca37847ee 2024-12-08T11:21:46,829 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/fd55ca44153d47f4a5dc61680b6995d8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/fd55ca44153d47f4a5dc61680b6995d8 2024-12-08T11:21:46,830 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/c24c0db857bf4580a1e69e66f1e3ac12 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/c24c0db857bf4580a1e69e66f1e3ac12 2024-12-08T11:21:46,831 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/aee09a3ba17844d5a51e20f3308fc273 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/aee09a3ba17844d5a51e20f3308fc273 2024-12-08T11:21:46,832 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/83eb2d1a3ee74faeac4b17484cafbb91 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/83eb2d1a3ee74faeac4b17484cafbb91 2024-12-08T11:21:46,833 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/7fb409763f6c4245801ab51b1d611ef9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/7fb409763f6c4245801ab51b1d611ef9 2024-12-08T11:21:46,833 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/4e7dd9269da24324841cfb0bbe2a9954 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/4e7dd9269da24324841cfb0bbe2a9954 2024-12-08T11:21:46,834 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/cd85a8eaab16433dbed9b74983debae2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/cd85a8eaab16433dbed9b74983debae2 2024-12-08T11:21:46,835 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/416ea4c23ac74302bc7e1861fc109404 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/416ea4c23ac74302bc7e1861fc109404 2024-12-08T11:21:46,836 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/d527115f083e4946b75eddb28594b022, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b4a385f573e2400bae7342dbf3168b05, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b8c2c7021edb40fca65e7c95815f5b54, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/90170f69731f487d9479d599035b660b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9ff7e107364c4f08b67fb6a1b6d27c34, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b0739feaf9f44b52baa29920eaec7891, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/38933341932c44a699c09f2c7efe64d9, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9bb9f86711424b9c901e1be77e05a3d8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/68986711a7c646888264ae7824690d75, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2306a92d11b048f3a811a5fb75f8b724, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e70b5ccbb4c24a1b99bac558917a7b58, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/948ee3dd8eb54c3eb5dd84fe377bf03f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/5a1a30aafbeb425fb182ab568c82f97e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/64a818e188544ed5b2787121aa7d762d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/4f06cafdfa764ef7b3dd65dc5e86c3cb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e8e6ce54ef0248ed9f2de57aca86a143, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/39f6ce9a4239472e8ffbb038118221ed, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/f18a94f7e541492ab08010002e763fe8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/0bc604d95cf24d59a2abd70b04e293bc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/ca436db2ca2d4b10b7fd51ccad23b300, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/72c137688843440eb28753098137fde6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b5e35381fee542fd9d6df33b476f1813, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/36908adfd3264385bad0a128df6c85c4] to archive 2024-12-08T11:21:46,836 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:46,837 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/d527115f083e4946b75eddb28594b022 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/d527115f083e4946b75eddb28594b022 2024-12-08T11:21:46,838 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b4a385f573e2400bae7342dbf3168b05 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b4a385f573e2400bae7342dbf3168b05 2024-12-08T11:21:46,839 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b8c2c7021edb40fca65e7c95815f5b54 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b8c2c7021edb40fca65e7c95815f5b54 2024-12-08T11:21:46,840 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/90170f69731f487d9479d599035b660b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/90170f69731f487d9479d599035b660b 2024-12-08T11:21:46,840 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9ff7e107364c4f08b67fb6a1b6d27c34 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9ff7e107364c4f08b67fb6a1b6d27c34 2024-12-08T11:21:46,841 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b0739feaf9f44b52baa29920eaec7891 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b0739feaf9f44b52baa29920eaec7891 2024-12-08T11:21:46,842 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/38933341932c44a699c09f2c7efe64d9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/38933341932c44a699c09f2c7efe64d9 2024-12-08T11:21:46,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9bb9f86711424b9c901e1be77e05a3d8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/9bb9f86711424b9c901e1be77e05a3d8 2024-12-08T11:21:46,843 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/68986711a7c646888264ae7824690d75 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/68986711a7c646888264ae7824690d75 2024-12-08T11:21:46,844 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2306a92d11b048f3a811a5fb75f8b724 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2306a92d11b048f3a811a5fb75f8b724 2024-12-08T11:21:46,845 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e70b5ccbb4c24a1b99bac558917a7b58 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e70b5ccbb4c24a1b99bac558917a7b58 2024-12-08T11:21:46,846 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/948ee3dd8eb54c3eb5dd84fe377bf03f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/948ee3dd8eb54c3eb5dd84fe377bf03f 2024-12-08T11:21:46,846 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/5a1a30aafbeb425fb182ab568c82f97e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/5a1a30aafbeb425fb182ab568c82f97e 2024-12-08T11:21:46,847 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/64a818e188544ed5b2787121aa7d762d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/64a818e188544ed5b2787121aa7d762d 2024-12-08T11:21:46,848 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/4f06cafdfa764ef7b3dd65dc5e86c3cb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/4f06cafdfa764ef7b3dd65dc5e86c3cb 2024-12-08T11:21:46,849 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e8e6ce54ef0248ed9f2de57aca86a143 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e8e6ce54ef0248ed9f2de57aca86a143 2024-12-08T11:21:46,849 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/39f6ce9a4239472e8ffbb038118221ed to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/39f6ce9a4239472e8ffbb038118221ed 2024-12-08T11:21:46,850 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/f18a94f7e541492ab08010002e763fe8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/f18a94f7e541492ab08010002e763fe8 2024-12-08T11:21:46,851 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/0bc604d95cf24d59a2abd70b04e293bc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/0bc604d95cf24d59a2abd70b04e293bc 2024-12-08T11:21:46,852 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/ca436db2ca2d4b10b7fd51ccad23b300 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/ca436db2ca2d4b10b7fd51ccad23b300 2024-12-08T11:21:46,853 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/72c137688843440eb28753098137fde6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/72c137688843440eb28753098137fde6 2024-12-08T11:21:46,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b5e35381fee542fd9d6df33b476f1813 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/b5e35381fee542fd9d6df33b476f1813 2024-12-08T11:21:46,854 DEBUG [StoreCloser-TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/36908adfd3264385bad0a128df6c85c4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/36908adfd3264385bad0a128df6c85c4 2024-12-08T11:21:46,858 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/recovered.edits/397.seqid, newMaxSeqId=397, maxSeqId=4 2024-12-08T11:21:46,858 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28. 2024-12-08T11:21:46,858 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 548fe72b592216ea9ca6f0d238246b28: 2024-12-08T11:21:46,860 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:46,860 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=548fe72b592216ea9ca6f0d238246b28, regionState=CLOSED 2024-12-08T11:21:46,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-08T11:21:46,862 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 548fe72b592216ea9ca6f0d238246b28, server=355ef6e50110,46083,1733656795491 in 2.2250 sec 2024-12-08T11:21:46,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-08T11:21:46,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=548fe72b592216ea9ca6f0d238246b28, UNASSIGN in 2.2290 sec 2024-12-08T11:21:46,864 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-08T11:21:46,864 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.2330 sec 2024-12-08T11:21:46,865 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656906865"}]},"ts":"1733656906865"} 2024-12-08T11:21:46,866 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T11:21:46,868 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T11:21:46,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.2450 sec 2024-12-08T11:21:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-08T11:21:48,735 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-08T11:21:48,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T11:21:48,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:48,736 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-08T11:21:48,737 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:48,739 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,741 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/recovered.edits] 2024-12-08T11:21:48,743 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/280fd2cfb51640f88eaef3c057f8511b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/280fd2cfb51640f88eaef3c057f8511b 2024-12-08T11:21:48,744 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/61ff64da8fcf497a9a8d598e0e44b8e6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/A/61ff64da8fcf497a9a8d598e0e44b8e6 2024-12-08T11:21:48,746 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/92d755f2cc974da1a48cecf5daf12f6e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/92d755f2cc974da1a48cecf5daf12f6e 2024-12-08T11:21:48,747 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a60a849e714d40a495232a2fcd1ae07b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/B/a60a849e714d40a495232a2fcd1ae07b 2024-12-08T11:21:48,749 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2af0ce57650f45ad9be6b02f90096aea to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/2af0ce57650f45ad9be6b02f90096aea 2024-12-08T11:21:48,750 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/95d99684e6b2441289c505a2bca9f395 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/95d99684e6b2441289c505a2bca9f395 2024-12-08T11:21:48,751 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/bbca4ac67400415c95e98b077dbf0c65 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/bbca4ac67400415c95e98b077dbf0c65 2024-12-08T11:21:48,751 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e24363e9023445fdabeed022f7afdec7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/C/e24363e9023445fdabeed022f7afdec7 2024-12-08T11:21:48,753 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/recovered.edits/397.seqid to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28/recovered.edits/397.seqid 2024-12-08T11:21:48,754 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,754 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T11:21:48,754 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T11:21:48,755 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-08T11:21:48,757 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208053a5bf78a0a4a419c5568b926e67795_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208053a5bf78a0a4a419c5568b926e67795_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,758 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081567626b1a7346008683f16f1e4f9f40_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412081567626b1a7346008683f16f1e4f9f40_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,759 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208287caa492f76436ba6d55f988af47926_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208287caa492f76436ba6d55f988af47926_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,760 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120840b63bf8c2744b0f8ea831fc56817bad_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120840b63bf8c2744b0f8ea831fc56817bad_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,761 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085529016f7aad4d16b771b7ad0cb5e738_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085529016f7aad4d16b771b7ad0cb5e738_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,762 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085c264f52ef814b82bc1dfb86b7173c70_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085c264f52ef814b82bc1dfb86b7173c70_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,763 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086224c657cdd34658a0b5bf1f4f7a9759_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412086224c657cdd34658a0b5bf1f4f7a9759_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,764 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208728c97baf872479691b69d3d3b6fe9aa_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208728c97baf872479691b69d3d3b6fe9aa_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,765 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087734aa11c4cd4fcb931f872edf55538e_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087734aa11c4cd4fcb931f872edf55538e_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,766 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087b26752a938e4679b2c018482e88a0c0_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412087b26752a938e4679b2c018482e88a0c0_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,767 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208878ab0da4cea418381159a32018f9321_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208878ab0da4cea418381159a32018f9321_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,768 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a1e559740f69414bb905f8252159299d_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208a1e559740f69414bb905f8252159299d_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,769 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b5a43e448c714bd6903c47e4a859465b_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b5a43e448c714bd6903c47e4a859465b_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,770 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bcd20de5430e4facbbe0d08d60305961_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bcd20de5430e4facbbe0d08d60305961_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,771 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c8dacaed799a4fd6933d617ee8d591e0_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c8dacaed799a4fd6933d617ee8d591e0_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,772 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c9579b892b3041d5b44bb3932b018656_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c9579b892b3041d5b44bb3932b018656_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,773 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d2a98cee228e4c2898b0ff7262e53d0a_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d2a98cee228e4c2898b0ff7262e53d0a_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,774 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208efab91a4d8544cc1a8e0f10041ce3280_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208efab91a4d8544cc1a8e0f10041ce3280_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,775 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f646a1c98e404947bdd140bf566cabaa_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f646a1c98e404947bdd140bf566cabaa_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,776 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f8bbc039d3ea473387de29fdbd9b10bd_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f8bbc039d3ea473387de29fdbd9b10bd_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,777 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f9592043d91e4eeb8457b8376384ece4_548fe72b592216ea9ca6f0d238246b28 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208f9592043d91e4eeb8457b8376384ece4_548fe72b592216ea9ca6f0d238246b28 2024-12-08T11:21:48,778 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T11:21:48,779 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:48,781 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T11:21:48,783 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T11:21:48,784 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:48,784 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T11:21:48,784 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733656908784"}]},"ts":"9223372036854775807"} 2024-12-08T11:21:48,786 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T11:21:48,786 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 548fe72b592216ea9ca6f0d238246b28, NAME => 'TestAcidGuarantees,,1733656879050.548fe72b592216ea9ca6f0d238246b28.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T11:21:48,786 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T11:21:48,786 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733656908786"}]},"ts":"9223372036854775807"} 2024-12-08T11:21:48,787 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T11:21:48,789 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:48,790 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 54 msec 2024-12-08T11:21:48,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-08T11:21:48,838 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-08T11:21:48,848 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=242 (was 240) - Thread LEAK? -, OpenFileDescriptor=456 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=483 (was 498), ProcessCount=11 (was 11), AvailableMemoryMB=6477 (was 6739) 2024-12-08T11:21:48,857 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=242, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=483, ProcessCount=11, AvailableMemoryMB=6477 2024-12-08T11:21:48,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:21:48,858 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:21:48,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T11:21:48,859 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T11:21:48,860 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:48,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 129 2024-12-08T11:21:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-08T11:21:48,860 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T11:21:48,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742307_1483 (size=963) 2024-12-08T11:21:48,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-08T11:21:49,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-08T11:21:49,267 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:21:49,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742308_1484 (size=53) 2024-12-08T11:21:49,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-08T11:21:49,672 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:21:49,673 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing f908e109f52a6b3eb513553b88d88e13, disabling compactions & flushes 2024-12-08T11:21:49,673 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:49,673 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:49,673 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. after waiting 0 ms 2024-12-08T11:21:49,673 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:49,673 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:49,673 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:49,674 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T11:21:49,674 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733656909674"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733656909674"}]},"ts":"1733656909674"} 2024-12-08T11:21:49,675 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T11:21:49,675 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T11:21:49,675 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656909675"}]},"ts":"1733656909675"} 2024-12-08T11:21:49,676 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T11:21:49,679 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f908e109f52a6b3eb513553b88d88e13, ASSIGN}] 2024-12-08T11:21:49,680 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f908e109f52a6b3eb513553b88d88e13, ASSIGN 2024-12-08T11:21:49,680 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=130, ppid=129, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f908e109f52a6b3eb513553b88d88e13, ASSIGN; state=OFFLINE, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=false 2024-12-08T11:21:49,831 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=f908e109f52a6b3eb513553b88d88e13, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:49,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; OpenRegionProcedure f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:21:49,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-08T11:21:49,983 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:49,986 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:49,986 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(7285): Opening region: {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:21:49,987 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,987 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:21:49,987 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(7327): checking encryption for f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,987 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(7330): checking classloading for f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,988 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,989 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:49,989 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f908e109f52a6b3eb513553b88d88e13 columnFamilyName A 2024-12-08T11:21:49,989 DEBUG [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:49,990 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.HStore(327): Store=f908e109f52a6b3eb513553b88d88e13/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:49,990 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,991 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:49,991 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f908e109f52a6b3eb513553b88d88e13 columnFamilyName B 2024-12-08T11:21:49,991 DEBUG [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:49,991 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.HStore(327): Store=f908e109f52a6b3eb513553b88d88e13/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:49,991 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,992 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:21:49,992 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f908e109f52a6b3eb513553b88d88e13 columnFamilyName C 2024-12-08T11:21:49,992 DEBUG [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:21:49,992 INFO [StoreOpener-f908e109f52a6b3eb513553b88d88e13-1 {}] regionserver.HStore(327): Store=f908e109f52a6b3eb513553b88d88e13/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:21:49,993 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:49,993 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,993 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,994 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:21:49,995 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1085): writing seq id for f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:49,997 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:21:49,997 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1102): Opened f908e109f52a6b3eb513553b88d88e13; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68437553, jitterRate=0.01979900896549225}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:21:49,998 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegion(1001): Region open journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:49,998 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., pid=131, masterSystemTime=1733656909983 2024-12-08T11:21:50,000 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:50,000 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=131}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:50,000 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=130 updating hbase:meta row=f908e109f52a6b3eb513553b88d88e13, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:21:50,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-08T11:21:50,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; OpenRegionProcedure f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 in 169 msec 2024-12-08T11:21:50,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-08T11:21:50,003 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f908e109f52a6b3eb513553b88d88e13, ASSIGN in 323 msec 2024-12-08T11:21:50,003 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T11:21:50,004 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656910003"}]},"ts":"1733656910003"} 2024-12-08T11:21:50,004 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T11:21:50,006 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=129, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T11:21:50,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1480 sec 2024-12-08T11:21:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-08T11:21:50,964 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 129 completed 2024-12-08T11:21:50,965 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x59bd764a to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@238db126 2024-12-08T11:21:50,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3512017b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:50,969 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:50,970 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38584, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:50,971 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T11:21:50,972 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49322, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T11:21:50,974 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-12-08T11:21:50,976 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:50,977 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-12-08T11:21:50,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:50,980 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-12-08T11:21:50,982 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:50,983 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-12-08T11:21:50,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:50,986 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-12-08T11:21:50,988 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:50,989 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-12-08T11:21:50,996 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:50,997 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-12-08T11:21:51,000 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:51,001 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-12-08T11:21:51,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:51,005 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-12-08T11:21:51,008 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:51,008 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-12-08T11:21:51,014 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:21:51,020 DEBUG [hconnection-0x490f6197-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,021 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,022 DEBUG [hconnection-0x18d7120-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,023 DEBUG [hconnection-0x36419904-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,023 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38592, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,024 DEBUG [hconnection-0x6e882e12-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,024 DEBUG [hconnection-0xac75cba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,024 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38608, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,024 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38610, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,025 DEBUG [hconnection-0x7fb45d9e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,025 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38624, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,026 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,028 DEBUG [hconnection-0x28151bb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,029 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:51,029 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38640, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-08T11:21:51,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-08T11:21:51,031 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:51,032 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:51,032 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:51,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:51,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:21:51,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:51,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:51,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:51,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:51,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:51,034 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:51,036 DEBUG [hconnection-0x59c35d6c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,036 DEBUG [hconnection-0x2a7e5b4d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,037 DEBUG [hconnection-0x55571ca7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:21:51,037 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38654, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,037 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,039 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:21:51,061 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/fec6062093bb4b5aa66d4a64862cf202 is 50, key is test_row_0/A:col10/1733656911032/Put/seqid=0 2024-12-08T11:21:51,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742309_1485 (size=12001) 2024-12-08T11:21:51,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/fec6062093bb4b5aa66d4a64862cf202 2024-12-08T11:21:51,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656971103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,108 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656971103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/61297bb403044fc48f0881ce3cabace3 is 50, key is test_row_0/B:col10/1733656911032/Put/seqid=0 2024-12-08T11:21:51,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742310_1486 (size=12001) 2024-12-08T11:21:51,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656971106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656971106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,128 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/61297bb403044fc48f0881ce3cabace3 2024-12-08T11:21:51,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656971107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-08T11:21:51,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:51,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:51,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/975db1079d1841ebb3afc88937412b08 is 50, key is test_row_0/C:col10/1733656911032/Put/seqid=0 2024-12-08T11:21:51,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742311_1487 (size=12001) 2024-12-08T11:21:51,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/975db1079d1841ebb3afc88937412b08 2024-12-08T11:21:51,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/fec6062093bb4b5aa66d4a64862cf202 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fec6062093bb4b5aa66d4a64862cf202 2024-12-08T11:21:51,209 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fec6062093bb4b5aa66d4a64862cf202, entries=150, sequenceid=14, filesize=11.7 K 2024-12-08T11:21:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/61297bb403044fc48f0881ce3cabace3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/61297bb403044fc48f0881ce3cabace3 2024-12-08T11:21:51,214 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/61297bb403044fc48f0881ce3cabace3, entries=150, sequenceid=14, filesize=11.7 K 2024-12-08T11:21:51,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656971209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656971210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/975db1079d1841ebb3afc88937412b08 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/975db1079d1841ebb3afc88937412b08 2024-12-08T11:21:51,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/975db1079d1841ebb3afc88937412b08, entries=150, sequenceid=14, filesize=11.7 K 2024-12-08T11:21:51,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f908e109f52a6b3eb513553b88d88e13 in 188ms, sequenceid=14, compaction requested=false 2024-12-08T11:21:51,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:51,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:51,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:21:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:51,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:51,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656971238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656971239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656971240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/4c8f63d48e9748eb94a30e0d5e7d32f1 is 50, key is test_row_0/A:col10/1733656911226/Put/seqid=0 2024-12-08T11:21:51,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742312_1488 (size=16681) 2024-12-08T11:21:51,281 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/4c8f63d48e9748eb94a30e0d5e7d32f1 2024-12-08T11:21:51,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/4c3ce83612404eee9ba2ae0a3b6b71c8 is 50, key is test_row_0/B:col10/1733656911226/Put/seqid=0 2024-12-08T11:21:51,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742313_1489 (size=12001) 2024-12-08T11:21:51,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-08T11:21:51,337 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,337 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:51,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:51,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656971342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656971341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656971344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656971416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656971417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,490 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,490 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,491 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656971543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,548 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656971543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656971546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-08T11:21:51,643 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:51,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:51,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,707 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/4c3ce83612404eee9ba2ae0a3b6b71c8 2024-12-08T11:21:51,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656971718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656971719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/dc432f8b2aa94b77b62b096a9cff84d3 is 50, key is test_row_0/C:col10/1733656911226/Put/seqid=0 2024-12-08T11:21:51,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742314_1490 (size=12001) 2024-12-08T11:21:51,796 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:51,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:51,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656971849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656971851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:51,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656971854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,950 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:51,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:51,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:51,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:51,950 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:51,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:52,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:52,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-08T11:21:52,140 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/dc432f8b2aa94b77b62b096a9cff84d3 2024-12-08T11:21:52,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/4c8f63d48e9748eb94a30e0d5e7d32f1 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4c8f63d48e9748eb94a30e0d5e7d32f1 2024-12-08T11:21:52,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4c8f63d48e9748eb94a30e0d5e7d32f1, entries=250, sequenceid=39, filesize=16.3 K 2024-12-08T11:21:52,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/4c3ce83612404eee9ba2ae0a3b6b71c8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/4c3ce83612404eee9ba2ae0a3b6b71c8 2024-12-08T11:21:52,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/4c3ce83612404eee9ba2ae0a3b6b71c8, entries=150, sequenceid=39, filesize=11.7 K 2024-12-08T11:21:52,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/dc432f8b2aa94b77b62b096a9cff84d3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/dc432f8b2aa94b77b62b096a9cff84d3 2024-12-08T11:21:52,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/dc432f8b2aa94b77b62b096a9cff84d3, entries=150, sequenceid=39, filesize=11.7 K 2024-12-08T11:21:52,157 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f908e109f52a6b3eb513553b88d88e13 in 930ms, sequenceid=39, compaction requested=false 2024-12-08T11:21:52,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:52,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:52,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:21:52,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:52,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:52,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:52,231 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/fe19557c1ccb455090380e8f9c78d5a3 is 50, key is test_row_0/A:col10/1733656912229/Put/seqid=0 2024-12-08T11:21:52,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742315_1491 (size=14341) 2024-12-08T11:21:52,243 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/fe19557c1ccb455090380e8f9c78d5a3 2024-12-08T11:21:52,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ca54eaeb0a6e49c9b21f75464656d95c is 50, key is test_row_0/B:col10/1733656912229/Put/seqid=0 2024-12-08T11:21:52,254 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:52,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:52,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742316_1492 (size=12001) 2024-12-08T11:21:52,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ca54eaeb0a6e49c9b21f75464656d95c 2024-12-08T11:21:52,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0943dc9b09ed441db6650ddb5e33406c is 50, key is test_row_0/C:col10/1733656912229/Put/seqid=0 2024-12-08T11:21:52,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742317_1493 (size=12001) 2024-12-08T11:21:52,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656972300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,313 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656972306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656972356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,363 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656972356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656972357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,379 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T11:21:52,407 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:52,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:52,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656972407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656972414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,560 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,561 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:52,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656972614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:52,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656972623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0943dc9b09ed441db6650ddb5e33406c 2024-12-08T11:21:52,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/fe19557c1ccb455090380e8f9c78d5a3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fe19557c1ccb455090380e8f9c78d5a3 2024-12-08T11:21:52,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fe19557c1ccb455090380e8f9c78d5a3, entries=200, sequenceid=52, filesize=14.0 K 2024-12-08T11:21:52,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ca54eaeb0a6e49c9b21f75464656d95c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ca54eaeb0a6e49c9b21f75464656d95c 2024-12-08T11:21:52,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ca54eaeb0a6e49c9b21f75464656d95c, entries=150, sequenceid=52, filesize=11.7 K 2024-12-08T11:21:52,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0943dc9b09ed441db6650ddb5e33406c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0943dc9b09ed441db6650ddb5e33406c 2024-12-08T11:21:52,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0943dc9b09ed441db6650ddb5e33406c, entries=150, sequenceid=52, filesize=11.7 K 2024-12-08T11:21:52,697 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f908e109f52a6b3eb513553b88d88e13 in 467ms, sequenceid=52, compaction requested=true 2024-12-08T11:21:52,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:52,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:52,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:52,697 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:52,697 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:52,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:52,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:52,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:52,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:52,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:52,698 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:21:52,699 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,699 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fec6062093bb4b5aa66d4a64862cf202, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4c8f63d48e9748eb94a30e0d5e7d32f1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fe19557c1ccb455090380e8f9c78d5a3] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=42.0 K 2024-12-08T11:21:52,704 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:52,704 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fec6062093bb4b5aa66d4a64862cf202, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733656911029 2024-12-08T11:21:52,704 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:21:52,704 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,704 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/61297bb403044fc48f0881ce3cabace3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/4c3ce83612404eee9ba2ae0a3b6b71c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ca54eaeb0a6e49c9b21f75464656d95c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=35.2 K 2024-12-08T11:21:52,704 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8f63d48e9748eb94a30e0d5e7d32f1, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733656911100 2024-12-08T11:21:52,709 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 61297bb403044fc48f0881ce3cabace3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733656911029 2024-12-08T11:21:52,709 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe19557c1ccb455090380e8f9c78d5a3, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733656911237 2024-12-08T11:21:52,709 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c3ce83612404eee9ba2ae0a3b6b71c8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733656911100 2024-12-08T11:21:52,710 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ca54eaeb0a6e49c9b21f75464656d95c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733656911237 2024-12-08T11:21:52,713 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:52,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-08T11:21:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,714 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T11:21:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:52,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,719 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#413 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:52,719 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/f9df69c880ee414aa120d21a53fafea0 is 50, key is test_row_0/A:col10/1733656912229/Put/seqid=0 2024-12-08T11:21:52,721 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:52,722 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2b54b04b577642ac92aac61cc8c7ba17 is 50, key is test_row_0/B:col10/1733656912229/Put/seqid=0 2024-12-08T11:21:52,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/639a4a454cba4ce6aa90d0d9447c9bc6 is 50, key is test_row_0/A:col10/1733656912304/Put/seqid=0 2024-12-08T11:21:52,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742319_1495 (size=12104) 2024-12-08T11:21:52,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742318_1494 (size=12104) 2024-12-08T11:21:52,777 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/f9df69c880ee414aa120d21a53fafea0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f9df69c880ee414aa120d21a53fafea0 2024-12-08T11:21:52,782 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into f9df69c880ee414aa120d21a53fafea0(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:52,782 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:52,782 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=13, startTime=1733656912697; duration=0sec 2024-12-08T11:21:52,782 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:52,782 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:21:52,782 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:52,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742320_1496 (size=12001) 2024-12-08T11:21:52,783 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/639a4a454cba4ce6aa90d0d9447c9bc6 2024-12-08T11:21:52,784 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:52,784 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:21:52,784 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,784 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/975db1079d1841ebb3afc88937412b08, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/dc432f8b2aa94b77b62b096a9cff84d3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0943dc9b09ed441db6650ddb5e33406c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=35.2 K 2024-12-08T11:21:52,785 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 975db1079d1841ebb3afc88937412b08, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1733656911029 2024-12-08T11:21:52,788 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc432f8b2aa94b77b62b096a9cff84d3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1733656911100 2024-12-08T11:21:52,788 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0943dc9b09ed441db6650ddb5e33406c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733656911237 2024-12-08T11:21:52,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/76413427743e4805adc73b223a2464ef is 50, key is test_row_0/B:col10/1733656912304/Put/seqid=0 2024-12-08T11:21:52,798 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:52,799 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/d80294f34ac3455698336e12e511cb1a is 50, key is test_row_0/C:col10/1733656912229/Put/seqid=0 2024-12-08T11:21:52,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742321_1497 (size=12001) 2024-12-08T11:21:52,811 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/76413427743e4805adc73b223a2464ef 2024-12-08T11:21:52,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742322_1498 (size=12104) 2024-12-08T11:21:52,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2f1a4b74266e4dfc8864e84c87acdd13 is 50, key is test_row_0/C:col10/1733656912304/Put/seqid=0 2024-12-08T11:21:52,833 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/d80294f34ac3455698336e12e511cb1a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d80294f34ac3455698336e12e511cb1a 2024-12-08T11:21:52,841 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into d80294f34ac3455698336e12e511cb1a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:52,842 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:52,842 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=13, startTime=1733656912698; duration=0sec 2024-12-08T11:21:52,842 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:52,842 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:21:52,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742323_1499 (size=12001) 2024-12-08T11:21:52,861 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2f1a4b74266e4dfc8864e84c87acdd13 2024-12-08T11:21:52,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/639a4a454cba4ce6aa90d0d9447c9bc6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/639a4a454cba4ce6aa90d0d9447c9bc6 2024-12-08T11:21:52,873 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/639a4a454cba4ce6aa90d0d9447c9bc6, entries=150, sequenceid=75, filesize=11.7 K 2024-12-08T11:21:52,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/76413427743e4805adc73b223a2464ef as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/76413427743e4805adc73b223a2464ef 2024-12-08T11:21:52,884 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/76413427743e4805adc73b223a2464ef, entries=150, sequenceid=75, filesize=11.7 K 2024-12-08T11:21:52,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2f1a4b74266e4dfc8864e84c87acdd13 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2f1a4b74266e4dfc8864e84c87acdd13 2024-12-08T11:21:52,894 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2f1a4b74266e4dfc8864e84c87acdd13, entries=150, sequenceid=75, filesize=11.7 K 2024-12-08T11:21:52,898 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for f908e109f52a6b3eb513553b88d88e13 in 184ms, sequenceid=75, compaction requested=false 2024-12-08T11:21:52,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:52,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:52,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-08T11:21:52,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-08T11:21:52,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-08T11:21:52,902 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8690 sec 2024-12-08T11:21:52,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.8740 sec 2024-12-08T11:21:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:52,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:21:52,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:52,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:52,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:52,938 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:52,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/72d589dee8c3408b95cc114382e8af47 is 50, key is test_row_1/A:col10/1733656912934/Put/seqid=0 2024-12-08T11:21:52,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742324_1500 (size=11997) 2024-12-08T11:21:53,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656972997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656972999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656973107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656973108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-08T11:21:53,135 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-08T11:21:53,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-08T11:21:53,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T11:21:53,138 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:53,139 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:53,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:53,153 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2b54b04b577642ac92aac61cc8c7ba17 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b54b04b577642ac92aac61cc8c7ba17 2024-12-08T11:21:53,156 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into 2b54b04b577642ac92aac61cc8c7ba17(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:53,156 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:53,157 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=13, startTime=1733656912697; duration=0sec 2024-12-08T11:21:53,157 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:53,157 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:21:53,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T11:21:53,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T11:21:53,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:53,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,291 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656973311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656973315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,347 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/72d589dee8c3408b95cc114382e8af47 2024-12-08T11:21:53,353 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/f80740c6a7b443f79666a6b29c71a0c8 is 50, key is test_row_1/B:col10/1733656912934/Put/seqid=0 2024-12-08T11:21:53,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742325_1501 (size=9657) 2024-12-08T11:21:53,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656973362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656973364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656973365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T11:21:53,443 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T11:21:53,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:53,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,444 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,596 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T11:21:53,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:53,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,597 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656973615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:53,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656973622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T11:21:53,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T11:21:53,749 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:53,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:53,758 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/f80740c6a7b443f79666a6b29c71a0c8 2024-12-08T11:21:53,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/03ebf2dbdb204756963598e429bc5870 is 50, key is test_row_1/C:col10/1733656912934/Put/seqid=0 2024-12-08T11:21:53,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742326_1502 (size=9657) 2024-12-08T11:21:53,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/03ebf2dbdb204756963598e429bc5870 2024-12-08T11:21:53,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/72d589dee8c3408b95cc114382e8af47 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/72d589dee8c3408b95cc114382e8af47 2024-12-08T11:21:53,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/72d589dee8c3408b95cc114382e8af47, entries=150, sequenceid=88, filesize=11.7 K 2024-12-08T11:21:53,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/f80740c6a7b443f79666a6b29c71a0c8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f80740c6a7b443f79666a6b29c71a0c8 2024-12-08T11:21:53,790 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f80740c6a7b443f79666a6b29c71a0c8, entries=100, sequenceid=88, filesize=9.4 K 2024-12-08T11:21:53,792 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/03ebf2dbdb204756963598e429bc5870 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/03ebf2dbdb204756963598e429bc5870 2024-12-08T11:21:53,796 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/03ebf2dbdb204756963598e429bc5870, entries=100, sequenceid=88, filesize=9.4 K 2024-12-08T11:21:53,797 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f908e109f52a6b3eb513553b88d88e13 in 859ms, sequenceid=88, compaction requested=true 2024-12-08T11:21:53,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:53,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:53,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:53,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:53,797 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:53,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:53,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:53,797 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:53,797 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:53,798 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36102 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:53,798 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:53,798 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:21:53,798 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:21:53,798 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,798 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,798 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f9df69c880ee414aa120d21a53fafea0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/639a4a454cba4ce6aa90d0d9447c9bc6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/72d589dee8c3408b95cc114382e8af47] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=35.3 K 2024-12-08T11:21:53,798 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b54b04b577642ac92aac61cc8c7ba17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/76413427743e4805adc73b223a2464ef, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f80740c6a7b443f79666a6b29c71a0c8] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=33.0 K 2024-12-08T11:21:53,798 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9df69c880ee414aa120d21a53fafea0, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733656911237 2024-12-08T11:21:53,798 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b54b04b577642ac92aac61cc8c7ba17, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733656911237 2024-12-08T11:21:53,799 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 639a4a454cba4ce6aa90d0d9447c9bc6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733656912295 2024-12-08T11:21:53,799 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 76413427743e4805adc73b223a2464ef, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733656912295 2024-12-08T11:21:53,799 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f80740c6a7b443f79666a6b29c71a0c8, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656912934 2024-12-08T11:21:53,799 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72d589dee8c3408b95cc114382e8af47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656912933 2024-12-08T11:21:53,811 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#422 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:53,812 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/488cea21c1aa464182854fb626060b0c is 50, key is test_row_0/B:col10/1733656912304/Put/seqid=0 2024-12-08T11:21:53,814 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#423 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:53,815 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/7258ee89f50744ba8331c3bd7b2bd341 is 50, key is test_row_0/A:col10/1733656912304/Put/seqid=0 2024-12-08T11:21:53,829 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T11:21:53,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742327_1503 (size=12207) 2024-12-08T11:21:53,849 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/488cea21c1aa464182854fb626060b0c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/488cea21c1aa464182854fb626060b0c 2024-12-08T11:21:53,853 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into 488cea21c1aa464182854fb626060b0c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:53,853 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:53,853 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=13, startTime=1733656913797; duration=0sec 2024-12-08T11:21:53,853 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:53,853 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:21:53,853 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:53,854 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:53,854 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:21:53,854 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,854 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d80294f34ac3455698336e12e511cb1a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2f1a4b74266e4dfc8864e84c87acdd13, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/03ebf2dbdb204756963598e429bc5870] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=33.0 K 2024-12-08T11:21:53,855 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d80294f34ac3455698336e12e511cb1a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1733656911237 2024-12-08T11:21:53,855 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2f1a4b74266e4dfc8864e84c87acdd13, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1733656912295 2024-12-08T11:21:53,855 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 03ebf2dbdb204756963598e429bc5870, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656912934 2024-12-08T11:21:53,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742328_1504 (size=12207) 2024-12-08T11:21:53,874 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#424 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:53,874 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/bd0ee3501e91412ba9138478d4c07bf4 is 50, key is test_row_0/C:col10/1733656912304/Put/seqid=0 2024-12-08T11:21:53,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742329_1505 (size=12207) 2024-12-08T11:21:53,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:53,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-08T11:21:53,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:53,902 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:21:53,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:53,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:53,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:53,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:53,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:53,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:53,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/29533b9a50b64539a655f87e92fbc0a2 is 50, key is test_row_0/A:col10/1733656912990/Put/seqid=0 2024-12-08T11:21:53,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742330_1506 (size=12001) 2024-12-08T11:21:54,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:54,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:54,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656974137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656974142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T11:21:54,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656974243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656974248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,271 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/7258ee89f50744ba8331c3bd7b2bd341 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/7258ee89f50744ba8331c3bd7b2bd341 2024-12-08T11:21:54,275 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into 7258ee89f50744ba8331c3bd7b2bd341(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:54,275 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:54,275 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=13, startTime=1733656913797; duration=0sec 2024-12-08T11:21:54,275 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:54,275 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:21:54,284 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/bd0ee3501e91412ba9138478d4c07bf4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bd0ee3501e91412ba9138478d4c07bf4 2024-12-08T11:21:54,289 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into bd0ee3501e91412ba9138478d4c07bf4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:54,289 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:54,289 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=13, startTime=1733656913797; duration=0sec 2024-12-08T11:21:54,289 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:54,289 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:21:54,327 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/29533b9a50b64539a655f87e92fbc0a2 2024-12-08T11:21:54,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/c97a161fc4e34778837b8aec21312dac is 50, key is test_row_0/B:col10/1733656912990/Put/seqid=0 2024-12-08T11:21:54,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742331_1507 (size=12001) 2024-12-08T11:21:54,374 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/c97a161fc4e34778837b8aec21312dac 2024-12-08T11:21:54,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4ec463e458c14c92841242005ad5817a is 50, key is test_row_0/C:col10/1733656912990/Put/seqid=0 2024-12-08T11:21:54,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742332_1508 (size=12001) 2024-12-08T11:21:54,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656974449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656974454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,752 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656974751, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:54,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656974759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:54,790 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4ec463e458c14c92841242005ad5817a 2024-12-08T11:21:54,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/29533b9a50b64539a655f87e92fbc0a2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/29533b9a50b64539a655f87e92fbc0a2 2024-12-08T11:21:54,807 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/29533b9a50b64539a655f87e92fbc0a2, entries=150, sequenceid=115, filesize=11.7 K 2024-12-08T11:21:54,808 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/c97a161fc4e34778837b8aec21312dac as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c97a161fc4e34778837b8aec21312dac 2024-12-08T11:21:54,812 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c97a161fc4e34778837b8aec21312dac, entries=150, sequenceid=115, filesize=11.7 K 2024-12-08T11:21:54,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4ec463e458c14c92841242005ad5817a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4ec463e458c14c92841242005ad5817a 2024-12-08T11:21:54,817 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4ec463e458c14c92841242005ad5817a, entries=150, sequenceid=115, filesize=11.7 K 2024-12-08T11:21:54,817 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f908e109f52a6b3eb513553b88d88e13 in 915ms, sequenceid=115, compaction requested=false 2024-12-08T11:21:54,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:54,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:54,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-08T11:21:54,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-08T11:21:54,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-08T11:21:54,821 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6810 sec 2024-12-08T11:21:54,823 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.6860 sec 2024-12-08T11:21:55,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-08T11:21:55,242 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-08T11:21:55,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:55,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-08T11:21:55,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T11:21:55,245 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:55,246 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:55,246 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:55,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:55,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:21:55,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:55,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:55,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:55,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:55,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:55,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:55,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/38a165b22ac8444f960f77c44c78f2a2 is 50, key is test_row_0/A:col10/1733656914135/Put/seqid=0 2024-12-08T11:21:55,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742333_1509 (size=12047) 2024-12-08T11:21:55,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/38a165b22ac8444f960f77c44c78f2a2 2024-12-08T11:21:55,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/d1e93b0480b34ffb93a3f9b172316986 is 50, key is test_row_0/B:col10/1733656914135/Put/seqid=0 2024-12-08T11:21:55,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742334_1510 (size=9707) 2024-12-08T11:21:55,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656975315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656975315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T11:21:55,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656975369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,371 DEBUG [Thread-2148 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4132 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., hostname=355ef6e50110,46083,1733656795491, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:55,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656975372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656975372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,376 DEBUG [Thread-2144 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., hostname=355ef6e50110,46083,1733656795491, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:55,377 DEBUG [Thread-2146 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., hostname=355ef6e50110,46083,1733656795491, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:55,398 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-08T11:21:55,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:55,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656975417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656975421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T11:21:55,551 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-08T11:21:55,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:55,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656975620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656975625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/d1e93b0480b34ffb93a3f9b172316986 2024-12-08T11:21:55,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a135647b70d0483e8716b11202ccb797 is 50, key is test_row_0/C:col10/1733656914135/Put/seqid=0 2024-12-08T11:21:55,706 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-08T11:21:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742335_1511 (size=9707) 2024-12-08T11:21:55,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:55,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a135647b70d0483e8716b11202ccb797 2024-12-08T11:21:55,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/38a165b22ac8444f960f77c44c78f2a2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/38a165b22ac8444f960f77c44c78f2a2 2024-12-08T11:21:55,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/38a165b22ac8444f960f77c44c78f2a2, entries=150, sequenceid=129, filesize=11.8 K 2024-12-08T11:21:55,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/d1e93b0480b34ffb93a3f9b172316986 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/d1e93b0480b34ffb93a3f9b172316986 2024-12-08T11:21:55,723 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/d1e93b0480b34ffb93a3f9b172316986, entries=100, sequenceid=129, filesize=9.5 K 2024-12-08T11:21:55,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a135647b70d0483e8716b11202ccb797 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a135647b70d0483e8716b11202ccb797 2024-12-08T11:21:55,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a135647b70d0483e8716b11202ccb797, entries=100, sequenceid=129, filesize=9.5 K 2024-12-08T11:21:55,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f908e109f52a6b3eb513553b88d88e13 in 475ms, sequenceid=129, compaction requested=true 2024-12-08T11:21:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:21:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:21:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:21:55,732 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:55,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:21:55,733 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:55,736 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36255 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:55,736 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:55,736 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:21:55,736 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:21:55,736 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,736 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,736 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/488cea21c1aa464182854fb626060b0c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c97a161fc4e34778837b8aec21312dac, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/d1e93b0480b34ffb93a3f9b172316986] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=33.1 K 2024-12-08T11:21:55,736 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/7258ee89f50744ba8331c3bd7b2bd341, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/29533b9a50b64539a655f87e92fbc0a2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/38a165b22ac8444f960f77c44c78f2a2] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=35.4 K 2024-12-08T11:21:55,736 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 488cea21c1aa464182854fb626060b0c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656912304 2024-12-08T11:21:55,737 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7258ee89f50744ba8331c3bd7b2bd341, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656912304 2024-12-08T11:21:55,738 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c97a161fc4e34778837b8aec21312dac, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733656912990 2024-12-08T11:21:55,738 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29533b9a50b64539a655f87e92fbc0a2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733656912990 2024-12-08T11:21:55,738 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d1e93b0480b34ffb93a3f9b172316986, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733656914135 2024-12-08T11:21:55,738 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 38a165b22ac8444f960f77c44c78f2a2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733656914135 2024-12-08T11:21:55,744 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#431 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:55,744 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:55,745 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/d9e35e06e25842ef86c836360d49504a is 50, key is test_row_0/A:col10/1733656914135/Put/seqid=0 2024-12-08T11:21:55,745 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/db4221266bea412b96e3d3a09cb32b46 is 50, key is test_row_0/B:col10/1733656914135/Put/seqid=0 2024-12-08T11:21:55,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742337_1513 (size=12359) 2024-12-08T11:21:55,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742336_1512 (size=12359) 2024-12-08T11:21:55,764 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/d9e35e06e25842ef86c836360d49504a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d9e35e06e25842ef86c836360d49504a 2024-12-08T11:21:55,769 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into d9e35e06e25842ef86c836360d49504a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:55,769 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:55,769 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=13, startTime=1733656915732; duration=0sec 2024-12-08T11:21:55,769 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:21:55,769 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:21:55,769 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:21:55,770 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33915 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:21:55,770 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:21:55,770 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,770 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bd0ee3501e91412ba9138478d4c07bf4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4ec463e458c14c92841242005ad5817a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a135647b70d0483e8716b11202ccb797] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=33.1 K 2024-12-08T11:21:55,771 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd0ee3501e91412ba9138478d4c07bf4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1733656912304 2024-12-08T11:21:55,771 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ec463e458c14c92841242005ad5817a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1733656912990 2024-12-08T11:21:55,771 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a135647b70d0483e8716b11202ccb797, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733656914135 2024-12-08T11:21:55,778 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#433 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:21:55,779 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a3e276c2e340419c938c1d80b7350588 is 50, key is test_row_0/C:col10/1733656914135/Put/seqid=0 2024-12-08T11:21:55,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742338_1514 (size=12359) 2024-12-08T11:21:55,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T11:21:55,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,860 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-08T11:21:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:55,861 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:21:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:55,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:55,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:55,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/cb93435b5ee640b498490412a6c01b3e is 50, key is test_row_0/A:col10/1733656915313/Put/seqid=0 2024-12-08T11:21:55,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742339_1515 (size=12151) 2024-12-08T11:21:55,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:55,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:55,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656975957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:55,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:55,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656975960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:56,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:56,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656976062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:56,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:56,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656976065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:56,172 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/db4221266bea412b96e3d3a09cb32b46 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/db4221266bea412b96e3d3a09cb32b46 2024-12-08T11:21:56,179 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into db4221266bea412b96e3d3a09cb32b46(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:56,179 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:56,179 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=13, startTime=1733656915732; duration=0sec 2024-12-08T11:21:56,179 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:56,179 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:21:56,187 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a3e276c2e340419c938c1d80b7350588 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a3e276c2e340419c938c1d80b7350588 2024-12-08T11:21:56,191 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into a3e276c2e340419c938c1d80b7350588(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:21:56,191 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:56,192 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=13, startTime=1733656915732; duration=0sec 2024-12-08T11:21:56,192 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:21:56,192 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:21:56,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:56,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656976267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:56,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:56,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656976269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:56,303 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/cb93435b5ee640b498490412a6c01b3e 2024-12-08T11:21:56,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2499288ecc0d45f69fedccdbf030136f is 50, key is test_row_0/B:col10/1733656915313/Put/seqid=0 2024-12-08T11:21:56,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742340_1516 (size=12151) 2024-12-08T11:21:56,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T11:21:56,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:56,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656976572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:56,577 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:56,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656976572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:56,713 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2499288ecc0d45f69fedccdbf030136f 2024-12-08T11:21:56,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/44acfde678744b6a86aa48d9559fa402 is 50, key is test_row_0/C:col10/1733656915313/Put/seqid=0 2024-12-08T11:21:56,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742341_1517 (size=12151) 2024-12-08T11:21:57,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:57,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656977078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:57,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:57,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656977080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:57,127 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/44acfde678744b6a86aa48d9559fa402 2024-12-08T11:21:57,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/cb93435b5ee640b498490412a6c01b3e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cb93435b5ee640b498490412a6c01b3e 2024-12-08T11:21:57,136 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cb93435b5ee640b498490412a6c01b3e, entries=150, sequenceid=154, filesize=11.9 K 2024-12-08T11:21:57,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2499288ecc0d45f69fedccdbf030136f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2499288ecc0d45f69fedccdbf030136f 2024-12-08T11:21:57,142 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2499288ecc0d45f69fedccdbf030136f, entries=150, sequenceid=154, filesize=11.9 K 2024-12-08T11:21:57,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/44acfde678744b6a86aa48d9559fa402 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/44acfde678744b6a86aa48d9559fa402 2024-12-08T11:21:57,148 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/44acfde678744b6a86aa48d9559fa402, entries=150, sequenceid=154, filesize=11.9 K 2024-12-08T11:21:57,149 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f908e109f52a6b3eb513553b88d88e13 in 1288ms, sequenceid=154, compaction requested=false 2024-12-08T11:21:57,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:57,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:57,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-08T11:21:57,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-08T11:21:57,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-08T11:21:57,151 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9040 sec 2024-12-08T11:21:57,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.9080 sec 2024-12-08T11:21:57,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-08T11:21:57,349 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-08T11:21:57,350 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:57,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-08T11:21:57,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T11:21:57,352 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:57,354 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:57,354 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:57,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T11:21:57,506 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:57,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-08T11:21:57,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:57,506 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:21:57,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:57,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:57,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:57,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:57,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:57,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:57,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/aab5f2e56bb04d3aa4ee519ecf95adab is 50, key is test_row_0/A:col10/1733656915959/Put/seqid=0 2024-12-08T11:21:57,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742342_1518 (size=12151) 2024-12-08T11:21:57,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T11:21:57,794 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fec6062093bb4b5aa66d4a64862cf202, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4c8f63d48e9748eb94a30e0d5e7d32f1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fe19557c1ccb455090380e8f9c78d5a3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f9df69c880ee414aa120d21a53fafea0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/639a4a454cba4ce6aa90d0d9447c9bc6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/7258ee89f50744ba8331c3bd7b2bd341, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/72d589dee8c3408b95cc114382e8af47, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/29533b9a50b64539a655f87e92fbc0a2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/38a165b22ac8444f960f77c44c78f2a2] to archive 2024-12-08T11:21:57,795 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:57,797 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fec6062093bb4b5aa66d4a64862cf202 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fec6062093bb4b5aa66d4a64862cf202 2024-12-08T11:21:57,798 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4c8f63d48e9748eb94a30e0d5e7d32f1 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4c8f63d48e9748eb94a30e0d5e7d32f1 2024-12-08T11:21:57,799 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fe19557c1ccb455090380e8f9c78d5a3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/fe19557c1ccb455090380e8f9c78d5a3 2024-12-08T11:21:57,800 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f9df69c880ee414aa120d21a53fafea0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f9df69c880ee414aa120d21a53fafea0 2024-12-08T11:21:57,801 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/639a4a454cba4ce6aa90d0d9447c9bc6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/639a4a454cba4ce6aa90d0d9447c9bc6 2024-12-08T11:21:57,803 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/7258ee89f50744ba8331c3bd7b2bd341 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/7258ee89f50744ba8331c3bd7b2bd341 2024-12-08T11:21:57,804 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/72d589dee8c3408b95cc114382e8af47 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/72d589dee8c3408b95cc114382e8af47 2024-12-08T11:21:57,805 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/29533b9a50b64539a655f87e92fbc0a2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/29533b9a50b64539a655f87e92fbc0a2 2024-12-08T11:21:57,806 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/38a165b22ac8444f960f77c44c78f2a2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/38a165b22ac8444f960f77c44c78f2a2 2024-12-08T11:21:57,807 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/61297bb403044fc48f0881ce3cabace3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/4c3ce83612404eee9ba2ae0a3b6b71c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b54b04b577642ac92aac61cc8c7ba17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ca54eaeb0a6e49c9b21f75464656d95c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/76413427743e4805adc73b223a2464ef, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/488cea21c1aa464182854fb626060b0c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f80740c6a7b443f79666a6b29c71a0c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c97a161fc4e34778837b8aec21312dac, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/d1e93b0480b34ffb93a3f9b172316986] to archive 2024-12-08T11:21:57,808 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:57,809 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/61297bb403044fc48f0881ce3cabace3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/61297bb403044fc48f0881ce3cabace3 2024-12-08T11:21:57,810 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/4c3ce83612404eee9ba2ae0a3b6b71c8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/4c3ce83612404eee9ba2ae0a3b6b71c8 2024-12-08T11:21:57,811 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b54b04b577642ac92aac61cc8c7ba17 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b54b04b577642ac92aac61cc8c7ba17 2024-12-08T11:21:57,812 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ca54eaeb0a6e49c9b21f75464656d95c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ca54eaeb0a6e49c9b21f75464656d95c 2024-12-08T11:21:57,813 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/76413427743e4805adc73b223a2464ef to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/76413427743e4805adc73b223a2464ef 2024-12-08T11:21:57,814 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/488cea21c1aa464182854fb626060b0c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/488cea21c1aa464182854fb626060b0c 2024-12-08T11:21:57,815 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f80740c6a7b443f79666a6b29c71a0c8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f80740c6a7b443f79666a6b29c71a0c8 2024-12-08T11:21:57,817 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c97a161fc4e34778837b8aec21312dac to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c97a161fc4e34778837b8aec21312dac 2024-12-08T11:21:57,818 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/d1e93b0480b34ffb93a3f9b172316986 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/d1e93b0480b34ffb93a3f9b172316986 2024-12-08T11:21:57,820 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/975db1079d1841ebb3afc88937412b08, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/dc432f8b2aa94b77b62b096a9cff84d3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d80294f34ac3455698336e12e511cb1a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0943dc9b09ed441db6650ddb5e33406c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2f1a4b74266e4dfc8864e84c87acdd13, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bd0ee3501e91412ba9138478d4c07bf4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/03ebf2dbdb204756963598e429bc5870, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4ec463e458c14c92841242005ad5817a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a135647b70d0483e8716b11202ccb797] to archive 2024-12-08T11:21:57,820 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:21:57,822 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/975db1079d1841ebb3afc88937412b08 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/975db1079d1841ebb3afc88937412b08 2024-12-08T11:21:57,823 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/dc432f8b2aa94b77b62b096a9cff84d3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/dc432f8b2aa94b77b62b096a9cff84d3 2024-12-08T11:21:57,824 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d80294f34ac3455698336e12e511cb1a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d80294f34ac3455698336e12e511cb1a 2024-12-08T11:21:57,825 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0943dc9b09ed441db6650ddb5e33406c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0943dc9b09ed441db6650ddb5e33406c 2024-12-08T11:21:57,826 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2f1a4b74266e4dfc8864e84c87acdd13 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2f1a4b74266e4dfc8864e84c87acdd13 2024-12-08T11:21:57,827 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bd0ee3501e91412ba9138478d4c07bf4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bd0ee3501e91412ba9138478d4c07bf4 2024-12-08T11:21:57,831 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/03ebf2dbdb204756963598e429bc5870 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/03ebf2dbdb204756963598e429bc5870 2024-12-08T11:21:57,836 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4ec463e458c14c92841242005ad5817a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4ec463e458c14c92841242005ad5817a 2024-12-08T11:21:57,837 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/355ef6e50110:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a135647b70d0483e8716b11202ccb797 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a135647b70d0483e8716b11202ccb797 2024-12-08T11:21:57,917 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/aab5f2e56bb04d3aa4ee519ecf95adab 2024-12-08T11:21:57,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/8c681efc56df4db790b74175e21e05e0 is 50, key is test_row_0/B:col10/1733656915959/Put/seqid=0 2024-12-08T11:21:57,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742343_1519 (size=12151) 2024-12-08T11:21:57,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T11:21:58,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:58,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:58,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656978149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656978150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656978256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656978256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,350 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/8c681efc56df4db790b74175e21e05e0 2024-12-08T11:21:58,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/f4d3347620dc46eb94b4d694ed9b4d33 is 50, key is test_row_0/C:col10/1733656915959/Put/seqid=0 2024-12-08T11:21:58,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742344_1520 (size=12151) 2024-12-08T11:21:58,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T11:21:58,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656978460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656978461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,766 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=168 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/f4d3347620dc46eb94b4d694ed9b4d33 2024-12-08T11:21:58,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656978765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:58,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656978765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:58,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/aab5f2e56bb04d3aa4ee519ecf95adab as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/aab5f2e56bb04d3aa4ee519ecf95adab 2024-12-08T11:21:58,774 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/aab5f2e56bb04d3aa4ee519ecf95adab, entries=150, sequenceid=168, filesize=11.9 K 2024-12-08T11:21:58,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/8c681efc56df4db790b74175e21e05e0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8c681efc56df4db790b74175e21e05e0 2024-12-08T11:21:58,788 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8c681efc56df4db790b74175e21e05e0, entries=150, sequenceid=168, filesize=11.9 K 2024-12-08T11:21:58,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/f4d3347620dc46eb94b4d694ed9b4d33 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4d3347620dc46eb94b4d694ed9b4d33 2024-12-08T11:21:58,792 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4d3347620dc46eb94b4d694ed9b4d33, entries=150, sequenceid=168, filesize=11.9 K 2024-12-08T11:21:58,793 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f908e109f52a6b3eb513553b88d88e13 in 1287ms, sequenceid=168, compaction requested=true 2024-12-08T11:21:58,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:21:58,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:58,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-08T11:21:58,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-08T11:21:58,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-08T11:21:58,796 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4410 sec 2024-12-08T11:21:58,798 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.4470 sec 2024-12-08T11:21:59,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:21:59,298 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T11:21:59,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:21:59,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:59,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:21:59,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:59,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:21:59,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:21:59,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/af80974ab0a6477d81ca8d685c3e397f is 50, key is test_row_0/A:col10/1733656918134/Put/seqid=0 2024-12-08T11:21:59,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742345_1521 (size=16931) 2024-12-08T11:21:59,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656979321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,330 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656979322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656979380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,386 DEBUG [Thread-2146 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., hostname=355ef6e50110,46083,1733656795491, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:59,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656979396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,402 DEBUG [Thread-2148 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8164 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., hostname=355ef6e50110,46083,1733656795491, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:59,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656979409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,415 DEBUG [Thread-2144 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8175 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., hostname=355ef6e50110,46083,1733656795491, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:21:59,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656979430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656979431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-08T11:21:59,456 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-08T11:21:59,457 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:21:59,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-12-08T11:21:59,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T11:21:59,458 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:21:59,459 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:21:59,459 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:21:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T11:21:59,610 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,611 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:21:59,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:59,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:59,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:59,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656979636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656979636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,706 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/af80974ab0a6477d81ca8d685c3e397f 2024-12-08T11:21:59,716 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/a1851ab42f4047da8891905ca28cfba4 is 50, key is test_row_0/B:col10/1733656918134/Put/seqid=0 2024-12-08T11:21:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742346_1522 (size=12151) 2024-12-08T11:21:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T11:21:59,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:21:59,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:59,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:59,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:59,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,916 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,917 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:21:59,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:59,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:21:59,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:21:59,917 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,917 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:21:59,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656979939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:21:59,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:21:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656979942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T11:22:00,069 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,070 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:22:00,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:00,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,070 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/a1851ab42f4047da8891905ca28cfba4 2024-12-08T11:22:00,133 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/7de9919f84454856b18276baa3b136c7 is 50, key is test_row_0/C:col10/1733656918134/Put/seqid=0 2024-12-08T11:22:00,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742347_1523 (size=12151) 2024-12-08T11:22:00,222 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,222 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:22:00,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:00,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,374 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:22:00,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:00,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,375 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656980447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:00,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656980451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,526 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:22:00,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:00,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:00,537 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/7de9919f84454856b18276baa3b136c7 2024-12-08T11:22:00,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/af80974ab0a6477d81ca8d685c3e397f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/af80974ab0a6477d81ca8d685c3e397f 2024-12-08T11:22:00,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/af80974ab0a6477d81ca8d685c3e397f, entries=250, sequenceid=194, filesize=16.5 K 2024-12-08T11:22:00,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/a1851ab42f4047da8891905ca28cfba4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a1851ab42f4047da8891905ca28cfba4 2024-12-08T11:22:00,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a1851ab42f4047da8891905ca28cfba4, entries=150, sequenceid=194, filesize=11.9 K 2024-12-08T11:22:00,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/7de9919f84454856b18276baa3b136c7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/7de9919f84454856b18276baa3b136c7 2024-12-08T11:22:00,552 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/7de9919f84454856b18276baa3b136c7, entries=150, sequenceid=194, filesize=11.9 K 2024-12-08T11:22:00,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f908e109f52a6b3eb513553b88d88e13 in 1256ms, sequenceid=194, compaction requested=true 2024-12-08T11:22:00,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:00,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:00,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:00,555 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:00,555 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:00,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:00,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:00,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:00,555 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:00,556 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:00,556 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53592 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:00,556 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:22:00,556 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:22:00,556 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,556 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/db4221266bea412b96e3d3a09cb32b46, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2499288ecc0d45f69fedccdbf030136f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8c681efc56df4db790b74175e21e05e0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a1851ab42f4047da8891905ca28cfba4] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=47.7 K 2024-12-08T11:22:00,556 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,557 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d9e35e06e25842ef86c836360d49504a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cb93435b5ee640b498490412a6c01b3e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/aab5f2e56bb04d3aa4ee519ecf95adab, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/af80974ab0a6477d81ca8d685c3e397f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=52.3 K 2024-12-08T11:22:00,557 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting db4221266bea412b96e3d3a09cb32b46, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733656912996 2024-12-08T11:22:00,557 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9e35e06e25842ef86c836360d49504a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733656912996 2024-12-08T11:22:00,557 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2499288ecc0d45f69fedccdbf030136f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733656915302 2024-12-08T11:22:00,557 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb93435b5ee640b498490412a6c01b3e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733656915302 2024-12-08T11:22:00,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c681efc56df4db790b74175e21e05e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733656915950 2024-12-08T11:22:00,558 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting aab5f2e56bb04d3aa4ee519ecf95adab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733656915950 2024-12-08T11:22:00,558 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a1851ab42f4047da8891905ca28cfba4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656918134 2024-12-08T11:22:00,558 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting af80974ab0a6477d81ca8d685c3e397f, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656918128 2024-12-08T11:22:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T11:22:00,570 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#443 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:00,570 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/f18aecfd391e4a0e8868b4874e7af505 is 50, key is test_row_0/B:col10/1733656918134/Put/seqid=0 2024-12-08T11:22:00,573 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#444 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:00,574 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/4ec3c3721d7d4129b701a08bc1a97ca7 is 50, key is test_row_0/A:col10/1733656918134/Put/seqid=0 2024-12-08T11:22:00,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742348_1524 (size=12289) 2024-12-08T11:22:00,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742349_1525 (size=12289) 2024-12-08T11:22:00,595 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/4ec3c3721d7d4129b701a08bc1a97ca7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4ec3c3721d7d4129b701a08bc1a97ca7 2024-12-08T11:22:00,600 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/f18aecfd391e4a0e8868b4874e7af505 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f18aecfd391e4a0e8868b4874e7af505 2024-12-08T11:22:00,602 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into 4ec3c3721d7d4129b701a08bc1a97ca7(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:00,602 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:00,602 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=12, startTime=1733656920555; duration=0sec 2024-12-08T11:22:00,602 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:00,602 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:22:00,602 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:00,603 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:00,603 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:22:00,603 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,603 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a3e276c2e340419c938c1d80b7350588, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/44acfde678744b6a86aa48d9559fa402, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4d3347620dc46eb94b4d694ed9b4d33, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/7de9919f84454856b18276baa3b136c7] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=47.7 K 2024-12-08T11:22:00,604 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a3e276c2e340419c938c1d80b7350588, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1733656912996 2024-12-08T11:22:00,605 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into f18aecfd391e4a0e8868b4874e7af505(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:00,605 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:00,605 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=12, startTime=1733656920555; duration=0sec 2024-12-08T11:22:00,605 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:00,605 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:22:00,608 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44acfde678744b6a86aa48d9559fa402, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1733656915302 2024-12-08T11:22:00,608 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4d3347620dc46eb94b4d694ed9b4d33, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=168, earliestPutTs=1733656915950 2024-12-08T11:22:00,608 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7de9919f84454856b18276baa3b136c7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656918134 2024-12-08T11:22:00,616 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#445 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:00,617 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/b756ebdea0394bdaa2b0f21f8a381c94 is 50, key is test_row_0/C:col10/1733656918134/Put/seqid=0 2024-12-08T11:22:00,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742350_1526 (size=12289) 2024-12-08T11:22:00,627 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/b756ebdea0394bdaa2b0f21f8a381c94 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/b756ebdea0394bdaa2b0f21f8a381c94 2024-12-08T11:22:00,631 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into b756ebdea0394bdaa2b0f21f8a381c94(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:00,631 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:00,631 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=12, startTime=1733656920555; duration=0sec 2024-12-08T11:22:00,632 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:00,632 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:22:00,679 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:00,679 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-12-08T11:22:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:00,680 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T11:22:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:00,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:00,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/99f5c1d16e934660a0b43a3175c199da is 50, key is test_row_0/A:col10/1733656919321/Put/seqid=0 2024-12-08T11:22:00,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742351_1527 (size=12151) 2024-12-08T11:22:01,089 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/99f5c1d16e934660a0b43a3175c199da 2024-12-08T11:22:01,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ec38f3fbbee947cf90f96ca7fbe19ecc is 50, key is test_row_0/B:col10/1733656919321/Put/seqid=0 2024-12-08T11:22:01,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742352_1528 (size=12151) 2024-12-08T11:22:01,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:01,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:01,500 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ec38f3fbbee947cf90f96ca7fbe19ecc 2024-12-08T11:22:01,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2b697930d0794f9c9e8aa33b9d19d799 is 50, key is test_row_0/C:col10/1733656919321/Put/seqid=0 2024-12-08T11:22:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742353_1529 (size=12151) 2024-12-08T11:22:01,518 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2b697930d0794f9c9e8aa33b9d19d799 2024-12-08T11:22:01,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/99f5c1d16e934660a0b43a3175c199da as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/99f5c1d16e934660a0b43a3175c199da 2024-12-08T11:22:01,526 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/99f5c1d16e934660a0b43a3175c199da, entries=150, sequenceid=207, filesize=11.9 K 2024-12-08T11:22:01,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ec38f3fbbee947cf90f96ca7fbe19ecc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ec38f3fbbee947cf90f96ca7fbe19ecc 2024-12-08T11:22:01,530 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ec38f3fbbee947cf90f96ca7fbe19ecc, entries=150, sequenceid=207, filesize=11.9 K 2024-12-08T11:22:01,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2b697930d0794f9c9e8aa33b9d19d799 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2b697930d0794f9c9e8aa33b9d19d799 2024-12-08T11:22:01,535 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2b697930d0794f9c9e8aa33b9d19d799, entries=150, sequenceid=207, filesize=11.9 K 2024-12-08T11:22:01,536 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=127.47 KB/130530 for f908e109f52a6b3eb513553b88d88e13 in 855ms, sequenceid=207, compaction requested=false 2024-12-08T11:22:01,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:01,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:01,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-08T11:22:01,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-12-08T11:22:01,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-08T11:22:01,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0780 sec 2024-12-08T11:22:01,539 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 2.0810 sec 2024-12-08T11:22:01,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:01,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T11:22:01,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:01,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:01,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:01,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:01,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:01,542 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:01,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e804dff5afd24973afcd0473b515061f is 50, key is test_row_0/A:col10/1733656921540/Put/seqid=0 2024-12-08T11:22:01,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742354_1530 (size=14541) 2024-12-08T11:22:01,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e804dff5afd24973afcd0473b515061f 2024-12-08T11:22:01,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2554754d16894c3f9717740abd8b4c56 is 50, key is test_row_0/B:col10/1733656921540/Put/seqid=0 2024-12-08T11:22:01,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-08T11:22:01,562 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-08T11:22:01,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:01,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742355_1531 (size=12151) 2024-12-08T11:22:01,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-12-08T11:22:01,566 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:01,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T11:22:01,567 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:01,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:01,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:01,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656981566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,577 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:01,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656981572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T11:22:01,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:01,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656981674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:01,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656981678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,718 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,719 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T11:22:01,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:01,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:01,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:01,719 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:01,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:01,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:01,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T11:22:01,872 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T11:22:01,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:01,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:01,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:01,873 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:01,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:01,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:01,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:01,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656981884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,887 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:01,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656981886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:01,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2554754d16894c3f9717740abd8b4c56 2024-12-08T11:22:01,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/52dca8cceb6643198a0333ab2f88ab32 is 50, key is test_row_0/C:col10/1733656921540/Put/seqid=0 2024-12-08T11:22:02,026 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T11:22:02,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:02,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:02,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:02,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:02,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:02,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:02,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742356_1532 (size=12151) 2024-12-08T11:22:02,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=231 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/52dca8cceb6643198a0333ab2f88ab32 2024-12-08T11:22:02,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e804dff5afd24973afcd0473b515061f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e804dff5afd24973afcd0473b515061f 2024-12-08T11:22:02,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e804dff5afd24973afcd0473b515061f, entries=200, sequenceid=231, filesize=14.2 K 2024-12-08T11:22:02,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2554754d16894c3f9717740abd8b4c56 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2554754d16894c3f9717740abd8b4c56 2024-12-08T11:22:02,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2554754d16894c3f9717740abd8b4c56, entries=150, sequenceid=231, filesize=11.9 K 2024-12-08T11:22:02,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/52dca8cceb6643198a0333ab2f88ab32 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/52dca8cceb6643198a0333ab2f88ab32 2024-12-08T11:22:02,067 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/52dca8cceb6643198a0333ab2f88ab32, entries=150, sequenceid=231, filesize=11.9 K 2024-12-08T11:22:02,067 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f908e109f52a6b3eb513553b88d88e13 in 527ms, sequenceid=231, compaction requested=true 2024-12-08T11:22:02,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:02,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:02,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:02,068 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:02,068 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:02,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:02,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:02,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:02,068 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:02,069 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38981 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:02,069 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:22:02,069 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:02,069 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4ec3c3721d7d4129b701a08bc1a97ca7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/99f5c1d16e934660a0b43a3175c199da, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e804dff5afd24973afcd0473b515061f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=38.1 K 2024-12-08T11:22:02,070 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36591 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:02,070 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:22:02,071 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:02,071 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f18aecfd391e4a0e8868b4874e7af505, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ec38f3fbbee947cf90f96ca7fbe19ecc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2554754d16894c3f9717740abd8b4c56] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=35.7 K 2024-12-08T11:22:02,071 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ec3c3721d7d4129b701a08bc1a97ca7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656918134 2024-12-08T11:22:02,071 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f18aecfd391e4a0e8868b4874e7af505, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656918134 2024-12-08T11:22:02,071 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99f5c1d16e934660a0b43a3175c199da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733656919314 2024-12-08T11:22:02,072 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ec38f3fbbee947cf90f96ca7fbe19ecc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733656919314 2024-12-08T11:22:02,072 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e804dff5afd24973afcd0473b515061f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733656921530 2024-12-08T11:22:02,072 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2554754d16894c3f9717740abd8b4c56, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733656921530 2024-12-08T11:22:02,094 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#452 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:02,094 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/b716bef6ed584049bfd989d6daf1edeb is 50, key is test_row_0/B:col10/1733656921540/Put/seqid=0 2024-12-08T11:22:02,097 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#453 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:02,097 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e940da723ca24ef8bf277599626c8fd5 is 50, key is test_row_0/A:col10/1733656921540/Put/seqid=0 2024-12-08T11:22:02,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742357_1533 (size=12391) 2024-12-08T11:22:02,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T11:22:02,181 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,182 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-12-08T11:22:02,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:02,182 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:22:02,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:02,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:02,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:02,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:02,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:02,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:02,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742358_1534 (size=12391) 2024-12-08T11:22:02,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/d6245f245db7496cbcaf7b226bbcb6e8 is 50, key is test_row_0/A:col10/1733656921564/Put/seqid=0 2024-12-08T11:22:02,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:02,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:02,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742359_1535 (size=12151) 2024-12-08T11:22:02,222 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/d6245f245db7496cbcaf7b226bbcb6e8 2024-12-08T11:22:02,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/fb5c2b062dbf44df8c54154c990a6868 is 50, key is test_row_0/B:col10/1733656921564/Put/seqid=0 2024-12-08T11:22:02,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742360_1536 (size=12151) 2024-12-08T11:22:02,238 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/fb5c2b062dbf44df8c54154c990a6868 2024-12-08T11:22:02,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a1d52907892c475a994ba87ba3f746a0 is 50, key is test_row_0/C:col10/1733656921564/Put/seqid=0 2024-12-08T11:22:02,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742361_1537 (size=12151) 2024-12-08T11:22:02,316 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656982309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,316 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656982311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656982417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656982417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,547 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/b716bef6ed584049bfd989d6daf1edeb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b716bef6ed584049bfd989d6daf1edeb 2024-12-08T11:22:02,551 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into b716bef6ed584049bfd989d6daf1edeb(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:02,551 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:02,551 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=13, startTime=1733656922068; duration=0sec 2024-12-08T11:22:02,551 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:02,551 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:22:02,551 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:02,552 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36591 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:02,552 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:22:02,552 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:02,553 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/b756ebdea0394bdaa2b0f21f8a381c94, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2b697930d0794f9c9e8aa33b9d19d799, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/52dca8cceb6643198a0333ab2f88ab32] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=35.7 K 2024-12-08T11:22:02,553 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b756ebdea0394bdaa2b0f21f8a381c94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1733656918134 2024-12-08T11:22:02,554 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b697930d0794f9c9e8aa33b9d19d799, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1733656919314 2024-12-08T11:22:02,554 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 52dca8cceb6643198a0333ab2f88ab32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733656921530 2024-12-08T11:22:02,562 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#457 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:02,563 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4dae68602e7e408faccd4aafe10ca4af is 50, key is test_row_0/C:col10/1733656921540/Put/seqid=0 2024-12-08T11:22:02,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742362_1538 (size=12391) 2024-12-08T11:22:02,574 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4dae68602e7e408faccd4aafe10ca4af as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4dae68602e7e408faccd4aafe10ca4af 2024-12-08T11:22:02,579 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into 4dae68602e7e408faccd4aafe10ca4af(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:02,580 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:02,580 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=13, startTime=1733656922068; duration=0sec 2024-12-08T11:22:02,580 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:02,580 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:22:02,594 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e940da723ca24ef8bf277599626c8fd5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e940da723ca24ef8bf277599626c8fd5 2024-12-08T11:22:02,601 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into e940da723ca24ef8bf277599626c8fd5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:02,601 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:02,601 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=13, startTime=1733656922068; duration=0sec 2024-12-08T11:22:02,601 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:02,601 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:22:02,626 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656982624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656982624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,650 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=243 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a1d52907892c475a994ba87ba3f746a0 2024-12-08T11:22:02,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/d6245f245db7496cbcaf7b226bbcb6e8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d6245f245db7496cbcaf7b226bbcb6e8 2024-12-08T11:22:02,664 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d6245f245db7496cbcaf7b226bbcb6e8, entries=150, sequenceid=243, filesize=11.9 K 2024-12-08T11:22:02,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/fb5c2b062dbf44df8c54154c990a6868 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/fb5c2b062dbf44df8c54154c990a6868 2024-12-08T11:22:02,669 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/fb5c2b062dbf44df8c54154c990a6868, entries=150, sequenceid=243, filesize=11.9 K 2024-12-08T11:22:02,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T11:22:02,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a1d52907892c475a994ba87ba3f746a0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1d52907892c475a994ba87ba3f746a0 2024-12-08T11:22:02,675 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1d52907892c475a994ba87ba3f746a0, entries=150, sequenceid=243, filesize=11.9 K 2024-12-08T11:22:02,675 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f908e109f52a6b3eb513553b88d88e13 in 493ms, sequenceid=243, compaction requested=false 2024-12-08T11:22:02,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:02,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:02,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-08T11:22:02,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-08T11:22:02,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-08T11:22:02,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1100 sec 2024-12-08T11:22:02,680 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.1150 sec 2024-12-08T11:22:02,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:02,931 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:22:02,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:02,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:02,931 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:02,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:02,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:02,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:02,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/780c8ef4ed754d9dbc060870fb093e96 is 50, key is test_row_0/A:col10/1733656922310/Put/seqid=0 2024-12-08T11:22:02,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742363_1539 (size=14741) 2024-12-08T11:22:02,942 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/780c8ef4ed754d9dbc060870fb093e96 2024-12-08T11:22:02,949 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/5efda39330d341219bd524839b13e196 is 50, key is test_row_0/B:col10/1733656922310/Put/seqid=0 2024-12-08T11:22:02,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656982949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:02,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:02,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742364_1540 (size=12301) 2024-12-08T11:22:02,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656982949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656983051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,059 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656983054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656983254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656983260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/5efda39330d341219bd524839b13e196 2024-12-08T11:22:03,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/bcac9ca1d2574fdb9e79ae4f816b2361 is 50, key is test_row_0/C:col10/1733656922310/Put/seqid=0 2024-12-08T11:22:03,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742365_1541 (size=12301) 2024-12-08T11:22:03,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/bcac9ca1d2574fdb9e79ae4f816b2361 2024-12-08T11:22:03,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/780c8ef4ed754d9dbc060870fb093e96 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/780c8ef4ed754d9dbc060870fb093e96 2024-12-08T11:22:03,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/780c8ef4ed754d9dbc060870fb093e96, entries=200, sequenceid=272, filesize=14.4 K 2024-12-08T11:22:03,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/5efda39330d341219bd524839b13e196 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/5efda39330d341219bd524839b13e196 2024-12-08T11:22:03,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/5efda39330d341219bd524839b13e196, entries=150, sequenceid=272, filesize=12.0 K 2024-12-08T11:22:03,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/bcac9ca1d2574fdb9e79ae4f816b2361 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bcac9ca1d2574fdb9e79ae4f816b2361 2024-12-08T11:22:03,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bcac9ca1d2574fdb9e79ae4f816b2361, entries=150, sequenceid=272, filesize=12.0 K 2024-12-08T11:22:03,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f908e109f52a6b3eb513553b88d88e13 in 448ms, sequenceid=272, compaction requested=true 2024-12-08T11:22:03,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:03,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:03,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:03,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:03,379 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:03,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:03,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:03,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:03,379 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:03,380 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36843 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:03,380 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:03,381 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:22:03,381 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:22:03,381 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:03,381 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:03,381 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e940da723ca24ef8bf277599626c8fd5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d6245f245db7496cbcaf7b226bbcb6e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/780c8ef4ed754d9dbc060870fb093e96] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=38.4 K 2024-12-08T11:22:03,381 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b716bef6ed584049bfd989d6daf1edeb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/fb5c2b062dbf44df8c54154c990a6868, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/5efda39330d341219bd524839b13e196] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=36.0 K 2024-12-08T11:22:03,381 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e940da723ca24ef8bf277599626c8fd5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733656921530 2024-12-08T11:22:03,382 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b716bef6ed584049bfd989d6daf1edeb, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733656921530 2024-12-08T11:22:03,382 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6245f245db7496cbcaf7b226bbcb6e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733656921557 2024-12-08T11:22:03,382 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting fb5c2b062dbf44df8c54154c990a6868, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733656921557 2024-12-08T11:22:03,382 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 780c8ef4ed754d9dbc060870fb093e96, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656922303 2024-12-08T11:22:03,382 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5efda39330d341219bd524839b13e196, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656922310 2024-12-08T11:22:03,388 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#461 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:03,388 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#462 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:03,388 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/52134c0332b04c64bb2778d0fa6d621c is 50, key is test_row_0/A:col10/1733656922310/Put/seqid=0 2024-12-08T11:22:03,389 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/847e168d1e7542cd8bcee808be55e80f is 50, key is test_row_0/B:col10/1733656922310/Put/seqid=0 2024-12-08T11:22:03,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742366_1542 (size=12643) 2024-12-08T11:22:03,398 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/847e168d1e7542cd8bcee808be55e80f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/847e168d1e7542cd8bcee808be55e80f 2024-12-08T11:22:03,404 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into 847e168d1e7542cd8bcee808be55e80f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:03,404 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:03,404 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=13, startTime=1733656923379; duration=0sec 2024-12-08T11:22:03,404 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:03,404 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:22:03,404 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:03,405 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36843 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:03,405 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:22:03,405 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:03,405 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4dae68602e7e408faccd4aafe10ca4af, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1d52907892c475a994ba87ba3f746a0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bcac9ca1d2574fdb9e79ae4f816b2361] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=36.0 K 2024-12-08T11:22:03,406 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dae68602e7e408faccd4aafe10ca4af, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=231, earliestPutTs=1733656921530 2024-12-08T11:22:03,406 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a1d52907892c475a994ba87ba3f746a0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=243, earliestPutTs=1733656921557 2024-12-08T11:22:03,406 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting bcac9ca1d2574fdb9e79ae4f816b2361, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656922310 2024-12-08T11:22:03,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742367_1543 (size=12643) 2024-12-08T11:22:03,415 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/52134c0332b04c64bb2778d0fa6d621c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/52134c0332b04c64bb2778d0fa6d621c 2024-12-08T11:22:03,417 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#463 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:03,417 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4cfb10264126460abff8b29343914286 is 50, key is test_row_0/C:col10/1733656922310/Put/seqid=0 2024-12-08T11:22:03,421 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into 52134c0332b04c64bb2778d0fa6d621c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:03,421 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:03,421 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=13, startTime=1733656923379; duration=0sec 2024-12-08T11:22:03,421 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:03,422 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:22:03,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742368_1544 (size=12643) 2024-12-08T11:22:03,443 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4cfb10264126460abff8b29343914286 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4cfb10264126460abff8b29343914286 2024-12-08T11:22:03,447 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into 4cfb10264126460abff8b29343914286(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:03,447 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:03,447 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=13, startTime=1733656923379; duration=0sec 2024-12-08T11:22:03,447 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:03,447 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:22:03,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:03,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:22:03,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:03,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:03,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:03,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:03,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:03,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:03,566 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/09773055b49847bfa8ce5d702eb2e318 is 50, key is test_row_0/A:col10/1733656923560/Put/seqid=0 2024-12-08T11:22:03,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742369_1545 (size=14741) 2024-12-08T11:22:03,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/09773055b49847bfa8ce5d702eb2e318 2024-12-08T11:22:03,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/430f8a4f532a4f83971c4ce06321ccea is 50, key is test_row_0/B:col10/1733656923560/Put/seqid=0 2024-12-08T11:22:03,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742370_1546 (size=12301) 2024-12-08T11:22:03,601 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/430f8a4f532a4f83971c4ce06321ccea 2024-12-08T11:22:03,607 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0b3addac640e442ab0b9c8a643b57b37 is 50, key is test_row_0/C:col10/1733656923560/Put/seqid=0 2024-12-08T11:22:03,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742371_1547 (size=12301) 2024-12-08T11:22:03,646 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656983639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656983646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-12-08T11:22:03,673 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-12-08T11:22:03,674 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:03,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-12-08T11:22:03,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T11:22:03,676 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:03,676 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:03,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:03,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656983748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656983754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T11:22:03,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,828 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-08T11:22:03,828 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:03,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:03,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:03,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:03,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:03,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:03,956 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656983952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:03,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656983960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T11:22:03,981 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:03,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-08T11:22:03,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:03,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:03,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:03,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:03,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:03,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:04,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0b3addac640e442ab0b9c8a643b57b37 2024-12-08T11:22:04,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/09773055b49847bfa8ce5d702eb2e318 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/09773055b49847bfa8ce5d702eb2e318 2024-12-08T11:22:04,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/09773055b49847bfa8ce5d702eb2e318, entries=200, sequenceid=286, filesize=14.4 K 2024-12-08T11:22:04,027 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/430f8a4f532a4f83971c4ce06321ccea as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/430f8a4f532a4f83971c4ce06321ccea 2024-12-08T11:22:04,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/430f8a4f532a4f83971c4ce06321ccea, entries=150, sequenceid=286, filesize=12.0 K 2024-12-08T11:22:04,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0b3addac640e442ab0b9c8a643b57b37 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0b3addac640e442ab0b9c8a643b57b37 2024-12-08T11:22:04,034 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0b3addac640e442ab0b9c8a643b57b37, entries=150, sequenceid=286, filesize=12.0 K 2024-12-08T11:22:04,035 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f908e109f52a6b3eb513553b88d88e13 in 473ms, sequenceid=286, compaction requested=false 2024-12-08T11:22:04,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:04,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-12-08T11:22:04,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:04,134 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:22:04,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:04,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:04,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:04,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:04,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:04,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:04,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2d60cf74b0a0418294fdf19404108640 is 50, key is test_row_0/A:col10/1733656923612/Put/seqid=0 2024-12-08T11:22:04,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742372_1548 (size=12301) 2024-12-08T11:22:04,143 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2d60cf74b0a0418294fdf19404108640 2024-12-08T11:22:04,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/de434c72b89a459dbce809b47d505df7 is 50, key is test_row_0/B:col10/1733656923612/Put/seqid=0 2024-12-08T11:22:04,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742373_1549 (size=12301) 2024-12-08T11:22:04,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:04,262 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:04,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T11:22:04,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656984286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656984287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656984395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656984396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,556 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/de434c72b89a459dbce809b47d505df7 2024-12-08T11:22:04,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/740bae1834584f9fa1eff6c23294a97c is 50, key is test_row_0/C:col10/1733656923612/Put/seqid=0 2024-12-08T11:22:04,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742374_1550 (size=12301) 2024-12-08T11:22:04,567 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/740bae1834584f9fa1eff6c23294a97c 2024-12-08T11:22:04,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2d60cf74b0a0418294fdf19404108640 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d60cf74b0a0418294fdf19404108640 2024-12-08T11:22:04,573 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d60cf74b0a0418294fdf19404108640, entries=150, sequenceid=311, filesize=12.0 K 2024-12-08T11:22:04,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/de434c72b89a459dbce809b47d505df7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de434c72b89a459dbce809b47d505df7 2024-12-08T11:22:04,577 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de434c72b89a459dbce809b47d505df7, entries=150, sequenceid=311, filesize=12.0 K 2024-12-08T11:22:04,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/740bae1834584f9fa1eff6c23294a97c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/740bae1834584f9fa1eff6c23294a97c 2024-12-08T11:22:04,581 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/740bae1834584f9fa1eff6c23294a97c, entries=150, sequenceid=311, filesize=12.0 K 2024-12-08T11:22:04,582 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f908e109f52a6b3eb513553b88d88e13 in 448ms, sequenceid=311, compaction requested=true 2024-12-08T11:22:04,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:04,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:04,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-08T11:22:04,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-08T11:22:04,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-08T11:22:04,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 907 msec 2024-12-08T11:22:04,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 911 msec 2024-12-08T11:22:04,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:04,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:22:04,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:04,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:04,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:04,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:04,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:04,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:04,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/da5b345c8f4f428e85f3ea4d56fb845b is 50, key is test_row_0/A:col10/1733656924286/Put/seqid=0 2024-12-08T11:22:04,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742375_1551 (size=12301) 2024-12-08T11:22:04,609 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/da5b345c8f4f428e85f3ea4d56fb845b 2024-12-08T11:22:04,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/1cb8a20404ae41e1a9c0094847ea4bb1 is 50, key is test_row_0/B:col10/1733656924286/Put/seqid=0 2024-12-08T11:22:04,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742376_1552 (size=12301) 2024-12-08T11:22:04,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656984668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656984669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-08T11:22:04,779 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-08T11:22:04,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,780 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:04,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656984775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,780 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656984775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-12-08T11:22:04,782 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:04,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T11:22:04,782 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:04,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:04,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T11:22:04,934 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T11:22:04,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:04,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:04,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:04,935 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:04,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:04,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:04,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656984981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:04,986 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:04,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656984982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,018 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/1cb8a20404ae41e1a9c0094847ea4bb1 2024-12-08T11:22:05,025 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0793560afad44d9da250b63d37bea9e3 is 50, key is test_row_0/C:col10/1733656924286/Put/seqid=0 2024-12-08T11:22:05,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742377_1553 (size=12301) 2024-12-08T11:22:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T11:22:05,086 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T11:22:05,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:05,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,239 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T11:22:05,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:05,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,240 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:05,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656985286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:05,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656985289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T11:22:05,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T11:22:05,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:05,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:05,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=323 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0793560afad44d9da250b63d37bea9e3 2024-12-08T11:22:05,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/da5b345c8f4f428e85f3ea4d56fb845b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/da5b345c8f4f428e85f3ea4d56fb845b 2024-12-08T11:22:05,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/da5b345c8f4f428e85f3ea4d56fb845b, entries=150, sequenceid=323, filesize=12.0 K 2024-12-08T11:22:05,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/1cb8a20404ae41e1a9c0094847ea4bb1 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1cb8a20404ae41e1a9c0094847ea4bb1 2024-12-08T11:22:05,440 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1cb8a20404ae41e1a9c0094847ea4bb1, entries=150, sequenceid=323, filesize=12.0 K 2024-12-08T11:22:05,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0793560afad44d9da250b63d37bea9e3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0793560afad44d9da250b63d37bea9e3 2024-12-08T11:22:05,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0793560afad44d9da250b63d37bea9e3, entries=150, sequenceid=323, filesize=12.0 K 2024-12-08T11:22:05,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f908e109f52a6b3eb513553b88d88e13 in 843ms, sequenceid=323, compaction requested=true 2024-12-08T11:22:05,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:05,444 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:05,445 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-12-08T11:22:05,445 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51986 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:05,445 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:22:05,446 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,446 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/52134c0332b04c64bb2778d0fa6d621c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/09773055b49847bfa8ce5d702eb2e318, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d60cf74b0a0418294fdf19404108640, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/da5b345c8f4f428e85f3ea4d56fb845b] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=50.8 K 2024-12-08T11:22:05,446 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52134c0332b04c64bb2778d0fa6d621c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656922310 2024-12-08T11:22:05,447 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09773055b49847bfa8ce5d702eb2e318, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733656922937 2024-12-08T11:22:05,447 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d60cf74b0a0418294fdf19404108640, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733656923612 2024-12-08T11:22:05,447 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting da5b345c8f4f428e85f3ea4d56fb845b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733656924282 2024-12-08T11:22:05,448 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:05,449 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49546 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:05,449 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:22:05,449 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,449 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/847e168d1e7542cd8bcee808be55e80f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/430f8a4f532a4f83971c4ce06321ccea, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de434c72b89a459dbce809b47d505df7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1cb8a20404ae41e1a9c0094847ea4bb1] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=48.4 K 2024-12-08T11:22:05,450 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 847e168d1e7542cd8bcee808be55e80f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656922310 2024-12-08T11:22:05,450 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 430f8a4f532a4f83971c4ce06321ccea, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733656922937 2024-12-08T11:22:05,450 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting de434c72b89a459dbce809b47d505df7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733656923612 2024-12-08T11:22:05,451 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cb8a20404ae41e1a9c0094847ea4bb1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733656924282 2024-12-08T11:22:05,454 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#473 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:05,454 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/53906e259aec4dfa85dd95d1997e2c06 is 50, key is test_row_0/A:col10/1733656924286/Put/seqid=0 2024-12-08T11:22:05,458 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#474 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:05,459 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/8094ae3ded064649823bbd2306ede7ec is 50, key is test_row_0/B:col10/1733656924286/Put/seqid=0 2024-12-08T11:22:05,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742378_1554 (size=12779) 2024-12-08T11:22:05,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742379_1555 (size=12779) 2024-12-08T11:22:05,545 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-12-08T11:22:05,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,546 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:22:05,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:05,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:05,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:05,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:05,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:05,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:05,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/751f3af06495449d9193aef13bbd3fcf is 50, key is test_row_0/A:col10/1733656924668/Put/seqid=0 2024-12-08T11:22:05,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742380_1556 (size=12301) 2024-12-08T11:22:05,555 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/751f3af06495449d9193aef13bbd3fcf 2024-12-08T11:22:05,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/770e5fcd98e9412ea4342bb79c5bfc52 is 50, key is test_row_0/B:col10/1733656924668/Put/seqid=0 2024-12-08T11:22:05,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742381_1557 (size=12301) 2024-12-08T11:22:05,567 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/770e5fcd98e9412ea4342bb79c5bfc52 2024-12-08T11:22:05,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/8a0fb136460c4e7cb3a49615db2d2656 is 50, key is test_row_0/C:col10/1733656924668/Put/seqid=0 2024-12-08T11:22:05,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742382_1558 (size=12301) 2024-12-08T11:22:05,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:05,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:05,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:05,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656985821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:05,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656985825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,870 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/53906e259aec4dfa85dd95d1997e2c06 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/53906e259aec4dfa85dd95d1997e2c06 2024-12-08T11:22:05,876 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into 53906e259aec4dfa85dd95d1997e2c06(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:05,876 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:05,876 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=12, startTime=1733656925444; duration=0sec 2024-12-08T11:22:05,877 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:05,877 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:22:05,877 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:05,878 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49546 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:05,878 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:22:05,878 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,878 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4cfb10264126460abff8b29343914286, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0b3addac640e442ab0b9c8a643b57b37, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/740bae1834584f9fa1eff6c23294a97c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0793560afad44d9da250b63d37bea9e3] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=48.4 K 2024-12-08T11:22:05,879 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cfb10264126460abff8b29343914286, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1733656922310 2024-12-08T11:22:05,880 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b3addac640e442ab0b9c8a643b57b37, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1733656922937 2024-12-08T11:22:05,881 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 740bae1834584f9fa1eff6c23294a97c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1733656923612 2024-12-08T11:22:05,881 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/8094ae3ded064649823bbd2306ede7ec as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8094ae3ded064649823bbd2306ede7ec 2024-12-08T11:22:05,881 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0793560afad44d9da250b63d37bea9e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733656924282 2024-12-08T11:22:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T11:22:05,887 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into 8094ae3ded064649823bbd2306ede7ec(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:05,887 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:05,887 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=12, startTime=1733656925445; duration=0sec 2024-12-08T11:22:05,887 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:05,887 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:22:05,892 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#478 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:05,893 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/80f28c7d3024486c8eaca6c0b0e9a851 is 50, key is test_row_0/C:col10/1733656924286/Put/seqid=0 2024-12-08T11:22:05,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742383_1559 (size=12779) 2024-12-08T11:22:05,916 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/80f28c7d3024486c8eaca6c0b0e9a851 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/80f28c7d3024486c8eaca6c0b0e9a851 2024-12-08T11:22:05,924 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into 80f28c7d3024486c8eaca6c0b0e9a851(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:05,924 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:05,924 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=12, startTime=1733656925445; duration=0sec 2024-12-08T11:22:05,924 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:05,924 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:22:05,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656985927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,936 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656985933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:05,977 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=347 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/8a0fb136460c4e7cb3a49615db2d2656 2024-12-08T11:22:05,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/751f3af06495449d9193aef13bbd3fcf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/751f3af06495449d9193aef13bbd3fcf 2024-12-08T11:22:05,986 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/751f3af06495449d9193aef13bbd3fcf, entries=150, sequenceid=347, filesize=12.0 K 2024-12-08T11:22:05,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/770e5fcd98e9412ea4342bb79c5bfc52 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/770e5fcd98e9412ea4342bb79c5bfc52 2024-12-08T11:22:05,990 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/770e5fcd98e9412ea4342bb79c5bfc52, entries=150, sequenceid=347, filesize=12.0 K 2024-12-08T11:22:05,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/8a0fb136460c4e7cb3a49615db2d2656 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/8a0fb136460c4e7cb3a49615db2d2656 2024-12-08T11:22:05,994 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/8a0fb136460c4e7cb3a49615db2d2656, entries=150, sequenceid=347, filesize=12.0 K 2024-12-08T11:22:05,994 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f908e109f52a6b3eb513553b88d88e13 in 448ms, sequenceid=347, compaction requested=false 2024-12-08T11:22:05,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:05,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:05,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-12-08T11:22:05,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-12-08T11:22:05,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-08T11:22:05,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2130 sec 2024-12-08T11:22:05,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.2170 sec 2024-12-08T11:22:06,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:06,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:22:06,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:06,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:06,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:06,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:06,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:06,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:06,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/6da2d5e4f22f4258bd95a3a70bd60284 is 50, key is test_row_0/A:col10/1733656925813/Put/seqid=0 2024-12-08T11:22:06,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742384_1560 (size=14741) 2024-12-08T11:22:06,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/6da2d5e4f22f4258bd95a3a70bd60284 2024-12-08T11:22:06,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/b0382b324db141d097c969a92d38894b is 50, key is test_row_0/B:col10/1733656925813/Put/seqid=0 2024-12-08T11:22:06,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742385_1561 (size=12301) 2024-12-08T11:22:06,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/b0382b324db141d097c969a92d38894b 2024-12-08T11:22:06,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0a5f9456fb7244518434158e819382d3 is 50, key is test_row_0/C:col10/1733656925813/Put/seqid=0 2024-12-08T11:22:06,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742386_1562 (size=12301) 2024-12-08T11:22:06,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=363 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0a5f9456fb7244518434158e819382d3 2024-12-08T11:22:06,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/6da2d5e4f22f4258bd95a3a70bd60284 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/6da2d5e4f22f4258bd95a3a70bd60284 2024-12-08T11:22:06,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/6da2d5e4f22f4258bd95a3a70bd60284, entries=200, sequenceid=363, filesize=14.4 K 2024-12-08T11:22:06,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/b0382b324db141d097c969a92d38894b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b0382b324db141d097c969a92d38894b 2024-12-08T11:22:06,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b0382b324db141d097c969a92d38894b, entries=150, sequenceid=363, filesize=12.0 K 2024-12-08T11:22:06,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/0a5f9456fb7244518434158e819382d3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0a5f9456fb7244518434158e819382d3 2024-12-08T11:22:06,197 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0a5f9456fb7244518434158e819382d3, entries=150, sequenceid=363, filesize=12.0 K 2024-12-08T11:22:06,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f908e109f52a6b3eb513553b88d88e13 in 65ms, sequenceid=363, compaction requested=true 2024-12-08T11:22:06,198 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:06,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:06,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:06,198 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:06,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:06,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:06,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:06,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:06,198 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:06,198 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:22:06,199 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:06,199 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:06,199 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/53906e259aec4dfa85dd95d1997e2c06, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/751f3af06495449d9193aef13bbd3fcf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/6da2d5e4f22f4258bd95a3a70bd60284] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=38.9 K 2024-12-08T11:22:06,199 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8094ae3ded064649823bbd2306ede7ec, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/770e5fcd98e9412ea4342bb79c5bfc52, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b0382b324db141d097c969a92d38894b] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=36.5 K 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53906e259aec4dfa85dd95d1997e2c06, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733656924282 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 8094ae3ded064649823bbd2306ede7ec, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733656924282 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 751f3af06495449d9193aef13bbd3fcf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733656924637 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 770e5fcd98e9412ea4342bb79c5bfc52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733656924637 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b0382b324db141d097c969a92d38894b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733656925813 2024-12-08T11:22:06,199 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6da2d5e4f22f4258bd95a3a70bd60284, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733656925813 2024-12-08T11:22:06,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:06,201 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-08T11:22:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:06,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:06,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e6b3e8ee31d247458dc7026095d8c427 is 50, key is test_row_0/A:col10/1733656926199/Put/seqid=0 2024-12-08T11:22:06,206 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#483 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:06,206 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/b57a2b58278b4d1689b30b3a8591af23 is 50, key is test_row_0/B:col10/1733656925813/Put/seqid=0 2024-12-08T11:22:06,207 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#484 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:06,207 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/828f6dc57fed4a5e978aab5ef455480b is 50, key is test_row_0/A:col10/1733656925813/Put/seqid=0 2024-12-08T11:22:06,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742388_1564 (size=12881) 2024-12-08T11:22:06,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742389_1565 (size=12881) 2024-12-08T11:22:06,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742387_1563 (size=14741) 2024-12-08T11:22:06,220 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/828f6dc57fed4a5e978aab5ef455480b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/828f6dc57fed4a5e978aab5ef455480b 2024-12-08T11:22:06,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e6b3e8ee31d247458dc7026095d8c427 2024-12-08T11:22:06,223 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/b57a2b58278b4d1689b30b3a8591af23 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b57a2b58278b4d1689b30b3a8591af23 2024-12-08T11:22:06,225 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into 828f6dc57fed4a5e978aab5ef455480b(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:06,225 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:06,225 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=13, startTime=1733656926198; duration=0sec 2024-12-08T11:22:06,225 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:06,225 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:22:06,225 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:06,226 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:06,226 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:22:06,226 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:06,226 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/80f28c7d3024486c8eaca6c0b0e9a851, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/8a0fb136460c4e7cb3a49615db2d2656, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0a5f9456fb7244518434158e819382d3] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=36.5 K 2024-12-08T11:22:06,228 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80f28c7d3024486c8eaca6c0b0e9a851, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=323, earliestPutTs=1733656924282 2024-12-08T11:22:06,232 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a0fb136460c4e7cb3a49615db2d2656, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=347, earliestPutTs=1733656924637 2024-12-08T11:22:06,232 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a5f9456fb7244518434158e819382d3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733656925813 2024-12-08T11:22:06,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/e3e3bbdc049944fcae7be3655958c7be is 50, key is test_row_0/B:col10/1733656926199/Put/seqid=0 2024-12-08T11:22:06,234 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into b57a2b58278b4d1689b30b3a8591af23(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:06,234 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:06,234 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=13, startTime=1733656926198; duration=0sec 2024-12-08T11:22:06,234 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:06,234 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:22:06,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742390_1566 (size=12301) 2024-12-08T11:22:06,240 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#486 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:06,240 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/f4b92b7aa76643bc85ff015cad3e64dc is 50, key is test_row_0/C:col10/1733656925813/Put/seqid=0 2024-12-08T11:22:06,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742391_1567 (size=12881) 2024-12-08T11:22:06,244 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656986234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656986239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,247 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/f4b92b7aa76643bc85ff015cad3e64dc as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4b92b7aa76643bc85ff015cad3e64dc 2024-12-08T11:22:06,252 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into f4b92b7aa76643bc85ff015cad3e64dc(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:06,252 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:06,252 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=13, startTime=1733656926198; duration=0sec 2024-12-08T11:22:06,252 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:06,252 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:22:06,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656986345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656986347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656986551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656986552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,639 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/e3e3bbdc049944fcae7be3655958c7be 2024-12-08T11:22:06,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/3c656caeef16470098f28417d0acf82b is 50, key is test_row_0/C:col10/1733656926199/Put/seqid=0 2024-12-08T11:22:06,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742392_1568 (size=12301) 2024-12-08T11:22:06,860 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656986857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,861 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:06,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656986857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:06,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-12-08T11:22:06,886 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-12-08T11:22:06,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:06,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-12-08T11:22:06,888 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:06,889 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:06,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T11:22:06,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:06,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T11:22:07,040 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,041 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-08T11:22:07,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:07,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:07,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:07,041 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:07,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:07,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:07,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=387 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/3c656caeef16470098f28417d0acf82b 2024-12-08T11:22:07,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/e6b3e8ee31d247458dc7026095d8c427 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e6b3e8ee31d247458dc7026095d8c427 2024-12-08T11:22:07,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e6b3e8ee31d247458dc7026095d8c427, entries=200, sequenceid=387, filesize=14.4 K 2024-12-08T11:22:07,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/e3e3bbdc049944fcae7be3655958c7be as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e3e3bbdc049944fcae7be3655958c7be 2024-12-08T11:22:07,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e3e3bbdc049944fcae7be3655958c7be, entries=150, sequenceid=387, filesize=12.0 K 2024-12-08T11:22:07,066 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/3c656caeef16470098f28417d0acf82b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3c656caeef16470098f28417d0acf82b 2024-12-08T11:22:07,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3c656caeef16470098f28417d0acf82b, entries=150, sequenceid=387, filesize=12.0 K 2024-12-08T11:22:07,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f908e109f52a6b3eb513553b88d88e13 in 868ms, sequenceid=387, compaction requested=false 2024-12-08T11:22:07,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:07,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T11:22:07,193 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-12-08T11:22:07,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:07,194 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:22:07,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:07,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:07,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:07,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:07,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:07,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:07,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/1262a4be36e8445fa6c58c290400109c is 50, key is test_row_0/A:col10/1733656926233/Put/seqid=0 2024-12-08T11:22:07,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742393_1569 (size=12301) 2024-12-08T11:22:07,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:07,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:07,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:07,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656987422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:07,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656987423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T11:22:07,530 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:07,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656987527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:07,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 273 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656987528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,602 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/1262a4be36e8445fa6c58c290400109c 2024-12-08T11:22:07,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/1836a6b7274045be8ad2e58acd733807 is 50, key is test_row_0/B:col10/1733656926233/Put/seqid=0 2024-12-08T11:22:07,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742394_1570 (size=12301) 2024-12-08T11:22:07,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:07,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656987732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:07,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 269 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656987732, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:07,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T11:22:08,013 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/1836a6b7274045be8ad2e58acd733807 2024-12-08T11:22:08,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/08a9de36af084b04a8c7471073f2078c is 50, key is test_row_0/C:col10/1733656926233/Put/seqid=0 2024-12-08T11:22:08,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742395_1571 (size=12301) 2024-12-08T11:22:08,033 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=402 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/08a9de36af084b04a8c7471073f2078c 2024-12-08T11:22:08,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/1262a4be36e8445fa6c58c290400109c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/1262a4be36e8445fa6c58c290400109c 2024-12-08T11:22:08,040 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/1262a4be36e8445fa6c58c290400109c, entries=150, sequenceid=402, filesize=12.0 K 2024-12-08T11:22:08,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 271 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656988036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,041 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656988037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/1836a6b7274045be8ad2e58acd733807 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1836a6b7274045be8ad2e58acd733807 2024-12-08T11:22:08,045 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1836a6b7274045be8ad2e58acd733807, entries=150, sequenceid=402, filesize=12.0 K 2024-12-08T11:22:08,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/08a9de36af084b04a8c7471073f2078c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/08a9de36af084b04a8c7471073f2078c 2024-12-08T11:22:08,050 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/08a9de36af084b04a8c7471073f2078c, entries=150, sequenceid=402, filesize=12.0 K 2024-12-08T11:22:08,051 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f908e109f52a6b3eb513553b88d88e13 in 857ms, sequenceid=402, compaction requested=true 2024-12-08T11:22:08,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:08,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:08,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-12-08T11:22:08,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-12-08T11:22:08,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-08T11:22:08,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1630 sec 2024-12-08T11:22:08,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.1670 sec 2024-12-08T11:22:08,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:08,545 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:22:08,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:08,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:08,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:08,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:08,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:08,545 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:08,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/f28a3e4cfc774d7a9780dbc225e26587 is 50, key is test_row_0/A:col10/1733656928543/Put/seqid=0 2024-12-08T11:22:08,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742396_1572 (size=14741) 2024-12-08T11:22:08,554 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/f28a3e4cfc774d7a9780dbc225e26587 2024-12-08T11:22:08,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/c138ea1c5150439a9c7d758f2e65b64a is 50, key is test_row_0/B:col10/1733656928543/Put/seqid=0 2024-12-08T11:22:08,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742397_1573 (size=12301) 2024-12-08T11:22:08,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656988570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656988572, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656988679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656988681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656988880, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,890 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:08,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656988887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:08,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/c138ea1c5150439a9c7d758f2e65b64a 2024-12-08T11:22:08,971 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/884599723f6d4c0d860c9e83020c085f is 50, key is test_row_0/C:col10/1733656928543/Put/seqid=0 2024-12-08T11:22:08,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742398_1574 (size=12301) 2024-12-08T11:22:08,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-08T11:22:08,993 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-12-08T11:22:08,994 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:08,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees 2024-12-08T11:22:08,995 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:08,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T11:22:08,996 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=150, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:08,996 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:09,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T11:22:09,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T11:22:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,190 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656989187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,196 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 284 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656989193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T11:22:09,301 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T11:22:09,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:09,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=427 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/884599723f6d4c0d860c9e83020c085f 2024-12-08T11:22:09,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/f28a3e4cfc774d7a9780dbc225e26587 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f28a3e4cfc774d7a9780dbc225e26587 2024-12-08T11:22:09,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f28a3e4cfc774d7a9780dbc225e26587, entries=200, sequenceid=427, filesize=14.4 K 2024-12-08T11:22:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/c138ea1c5150439a9c7d758f2e65b64a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c138ea1c5150439a9c7d758f2e65b64a 2024-12-08T11:22:09,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c138ea1c5150439a9c7d758f2e65b64a, entries=150, sequenceid=427, filesize=12.0 K 2024-12-08T11:22:09,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/884599723f6d4c0d860c9e83020c085f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/884599723f6d4c0d860c9e83020c085f 2024-12-08T11:22:09,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/884599723f6d4c0d860c9e83020c085f, entries=150, sequenceid=427, filesize=12.0 K 2024-12-08T11:22:09,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f908e109f52a6b3eb513553b88d88e13 in 846ms, sequenceid=427, compaction requested=true 2024-12-08T11:22:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:09,391 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:09,391 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:09,391 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:09,392 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54664 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:09,392 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49784 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:09,392 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:22:09,392 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:22:09,392 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,392 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,392 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/828f6dc57fed4a5e978aab5ef455480b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e6b3e8ee31d247458dc7026095d8c427, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/1262a4be36e8445fa6c58c290400109c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f28a3e4cfc774d7a9780dbc225e26587] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=53.4 K 2024-12-08T11:22:09,392 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b57a2b58278b4d1689b30b3a8591af23, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e3e3bbdc049944fcae7be3655958c7be, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1836a6b7274045be8ad2e58acd733807, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c138ea1c5150439a9c7d758f2e65b64a] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=48.6 K 2024-12-08T11:22:09,393 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 828f6dc57fed4a5e978aab5ef455480b, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733656925813 2024-12-08T11:22:09,393 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b57a2b58278b4d1689b30b3a8591af23, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733656925813 2024-12-08T11:22:09,393 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6b3e8ee31d247458dc7026095d8c427, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733656926188 2024-12-08T11:22:09,393 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e3e3bbdc049944fcae7be3655958c7be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733656926188 2024-12-08T11:22:09,393 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1262a4be36e8445fa6c58c290400109c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733656926219 2024-12-08T11:22:09,393 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 1836a6b7274045be8ad2e58acd733807, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733656926219 2024-12-08T11:22:09,393 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c138ea1c5150439a9c7d758f2e65b64a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733656927394 2024-12-08T11:22:09,394 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f28a3e4cfc774d7a9780dbc225e26587, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733656927394 2024-12-08T11:22:09,400 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#494 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:09,401 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/de0fc0140ebe4d76b66db99d4dd180bb is 50, key is test_row_0/B:col10/1733656928543/Put/seqid=0 2024-12-08T11:22:09,402 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#495 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:09,403 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/cc66b2e1590f4586aa98185548f00d9c is 50, key is test_row_0/A:col10/1733656928543/Put/seqid=0 2024-12-08T11:22:09,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742400_1576 (size=13017) 2024-12-08T11:22:09,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742399_1575 (size=13017) 2024-12-08T11:22:09,414 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/cc66b2e1590f4586aa98185548f00d9c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cc66b2e1590f4586aa98185548f00d9c 2024-12-08T11:22:09,414 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/de0fc0140ebe4d76b66db99d4dd180bb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de0fc0140ebe4d76b66db99d4dd180bb 2024-12-08T11:22:09,418 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into cc66b2e1590f4586aa98185548f00d9c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:09,418 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:09,418 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=12, startTime=1733656929391; duration=0sec 2024-12-08T11:22:09,418 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:09,418 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:22:09,418 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:09,419 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into de0fc0140ebe4d76b66db99d4dd180bb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:09,419 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:09,419 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=12, startTime=1733656929391; duration=0sec 2024-12-08T11:22:09,419 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:09,419 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:22:09,420 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49784 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:09,420 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:22:09,420 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,420 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4b92b7aa76643bc85ff015cad3e64dc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3c656caeef16470098f28417d0acf82b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/08a9de36af084b04a8c7471073f2078c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/884599723f6d4c0d860c9e83020c085f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=48.6 K 2024-12-08T11:22:09,421 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4b92b7aa76643bc85ff015cad3e64dc, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=363, earliestPutTs=1733656925813 2024-12-08T11:22:09,422 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c656caeef16470098f28417d0acf82b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=387, earliestPutTs=1733656926188 2024-12-08T11:22:09,422 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08a9de36af084b04a8c7471073f2078c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=402, earliestPutTs=1733656926219 2024-12-08T11:22:09,422 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 884599723f6d4c0d860c9e83020c085f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733656927394 2024-12-08T11:22:09,435 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#496 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:09,435 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/3ef06deb315a4ba7b5723175339e2ecd is 50, key is test_row_0/C:col10/1733656928543/Put/seqid=0 2024-12-08T11:22:09,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742401_1577 (size=13017) 2024-12-08T11:22:09,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:09,449 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-08T11:22:09,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:09,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:09,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:09,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:09,449 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:09,450 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:09,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/27dfa16e2a3445379daa0f3f22c5daf4 is 50, key is test_row_0/A:col10/1733656928560/Put/seqid=0 2024-12-08T11:22:09,454 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T11:22:09,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:09,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742402_1578 (size=14741) 2024-12-08T11:22:09,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/27dfa16e2a3445379daa0f3f22c5daf4 2024-12-08T11:22:09,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,463 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/cb9745776963465b86d4a423df819cba is 50, key is test_row_0/B:col10/1733656928560/Put/seqid=0 2024-12-08T11:22:09,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742403_1579 (size=12301) 2024-12-08T11:22:09,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656989546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656989547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656989547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T11:22:09,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T11:22:09,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:09,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,608 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656989653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656989657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656989657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656989695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,700 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 286 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656989699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,760 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T11:22:09,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:09,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] handler.RSProcedureHandler(58): pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=151 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=151 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:09,844 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/3ef06deb315a4ba7b5723175339e2ecd as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3ef06deb315a4ba7b5723175339e2ecd 2024-12-08T11:22:09,849 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into 3ef06deb315a4ba7b5723175339e2ecd(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:09,849 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:09,849 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=12, startTime=1733656929391; duration=0sec 2024-12-08T11:22:09,850 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:09,850 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:22:09,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656989860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656989861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656989861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/cb9745776963465b86d4a423df819cba 2024-12-08T11:22:09,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/d2bac5f025664e4ca88287fe6ca45c46 is 50, key is test_row_0/C:col10/1733656928560/Put/seqid=0 2024-12-08T11:22:09,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742404_1580 (size=12301) 2024-12-08T11:22:09,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/d2bac5f025664e4ca88287fe6ca45c46 2024-12-08T11:22:09,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/27dfa16e2a3445379daa0f3f22c5daf4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/27dfa16e2a3445379daa0f3f22c5daf4 2024-12-08T11:22:09,895 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/27dfa16e2a3445379daa0f3f22c5daf4, entries=200, sequenceid=441, filesize=14.4 K 2024-12-08T11:22:09,895 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/cb9745776963465b86d4a423df819cba as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/cb9745776963465b86d4a423df819cba 2024-12-08T11:22:09,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/cb9745776963465b86d4a423df819cba, entries=150, sequenceid=441, filesize=12.0 K 2024-12-08T11:22:09,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/d2bac5f025664e4ca88287fe6ca45c46 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d2bac5f025664e4ca88287fe6ca45c46 2024-12-08T11:22:09,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d2bac5f025664e4ca88287fe6ca45c46, entries=150, sequenceid=441, filesize=12.0 K 2024-12-08T11:22:09,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for f908e109f52a6b3eb513553b88d88e13 in 458ms, sequenceid=441, compaction requested=false 2024-12-08T11:22:09,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:09,912 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:09,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=151 2024-12-08T11:22:09,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,913 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:22:09,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:09,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:09,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:09,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:09,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:09,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:09,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2f9d0d119b3d4855ab2e1dad8b522b5a is 50, key is test_row_0/A:col10/1733656929546/Put/seqid=0 2024-12-08T11:22:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742405_1581 (size=12301) 2024-12-08T11:22:09,930 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2f9d0d119b3d4855ab2e1dad8b522b5a 2024-12-08T11:22:09,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/e9447dd11cb24dd199e08cf4b2896a23 is 50, key is test_row_0/B:col10/1733656929546/Put/seqid=0 2024-12-08T11:22:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742406_1582 (size=12301) 2024-12-08T11:22:09,943 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/e9447dd11cb24dd199e08cf4b2896a23 2024-12-08T11:22:09,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/ebc541f82def45b8b36b64afbefdb27b is 50, key is test_row_0/C:col10/1733656929546/Put/seqid=0 2024-12-08T11:22:09,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742407_1583 (size=12301) 2024-12-08T11:22:09,958 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=467 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/ebc541f82def45b8b36b64afbefdb27b 2024-12-08T11:22:09,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2f9d0d119b3d4855ab2e1dad8b522b5a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2f9d0d119b3d4855ab2e1dad8b522b5a 2024-12-08T11:22:09,968 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2f9d0d119b3d4855ab2e1dad8b522b5a, entries=150, sequenceid=467, filesize=12.0 K 2024-12-08T11:22:09,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/e9447dd11cb24dd199e08cf4b2896a23 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e9447dd11cb24dd199e08cf4b2896a23 2024-12-08T11:22:09,974 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e9447dd11cb24dd199e08cf4b2896a23, entries=150, sequenceid=467, filesize=12.0 K 2024-12-08T11:22:09,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/ebc541f82def45b8b36b64afbefdb27b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/ebc541f82def45b8b36b64afbefdb27b 2024-12-08T11:22:09,978 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/ebc541f82def45b8b36b64afbefdb27b, entries=150, sequenceid=467, filesize=12.0 K 2024-12-08T11:22:09,979 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for f908e109f52a6b3eb513553b88d88e13 in 66ms, sequenceid=467, compaction requested=true 2024-12-08T11:22:09,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:09,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:09,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=151}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=151 2024-12-08T11:22:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=151 2024-12-08T11:22:09,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-08T11:22:09,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 984 msec 2024-12-08T11:22:09,984 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=150, table=TestAcidGuarantees in 989 msec 2024-12-08T11:22:10,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-12-08T11:22:10,098 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-12-08T11:22:10,100 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:10,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees 2024-12-08T11:22:10,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T11:22:10,101 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:10,102 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=152, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:10,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:10,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:10,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:22:10,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:10,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:10,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:10,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:10,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:10,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:10,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/ada1fdc24491410d9997826470294c4f is 50, key is test_row_0/A:col10/1733656930166/Put/seqid=0 2024-12-08T11:22:10,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742408_1584 (size=17181) 2024-12-08T11:22:10,185 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/ada1fdc24491410d9997826470294c4f 2024-12-08T11:22:10,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/034bf338ef8d4327ab4b5c2212d500c9 is 50, key is test_row_0/B:col10/1733656930166/Put/seqid=0 2024-12-08T11:22:10,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742409_1585 (size=12301) 2024-12-08T11:22:10,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T11:22:10,253 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,253 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:10,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:10,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,254 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656990269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656990270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656990270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656990376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656990376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656990379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T11:22:10,406 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,406 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:10,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:10,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,406 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,558 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656990581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656990581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656990583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,594 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/034bf338ef8d4327ab4b5c2212d500c9 2024-12-08T11:22:10,600 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2c61de10c35c49309a78b42ffe75e77a is 50, key is test_row_0/C:col10/1733656930166/Put/seqid=0 2024-12-08T11:22:10,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742410_1586 (size=12301) 2024-12-08T11:22:10,612 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2c61de10c35c49309a78b42ffe75e77a 2024-12-08T11:22:10,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/ada1fdc24491410d9997826470294c4f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/ada1fdc24491410d9997826470294c4f 2024-12-08T11:22:10,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/ada1fdc24491410d9997826470294c4f, entries=250, sequenceid=478, filesize=16.8 K 2024-12-08T11:22:10,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/034bf338ef8d4327ab4b5c2212d500c9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/034bf338ef8d4327ab4b5c2212d500c9 2024-12-08T11:22:10,623 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/034bf338ef8d4327ab4b5c2212d500c9, entries=150, sequenceid=478, filesize=12.0 K 2024-12-08T11:22:10,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/2c61de10c35c49309a78b42ffe75e77a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2c61de10c35c49309a78b42ffe75e77a 2024-12-08T11:22:10,628 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2c61de10c35c49309a78b42ffe75e77a, entries=150, sequenceid=478, filesize=12.0 K 2024-12-08T11:22:10,629 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f908e109f52a6b3eb513553b88d88e13 in 452ms, sequenceid=478, compaction requested=true 2024-12-08T11:22:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:10,629 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:10,629 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f908e109f52a6b3eb513553b88d88e13:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:10,629 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:10,630 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:10,630 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/B is initiating minor compaction (all files) 2024-12-08T11:22:10,630 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/B in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,630 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de0fc0140ebe4d76b66db99d4dd180bb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/cb9745776963465b86d4a423df819cba, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e9447dd11cb24dd199e08cf4b2896a23, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/034bf338ef8d4327ab4b5c2212d500c9] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=48.8 K 2024-12-08T11:22:10,631 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57240 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:10,631 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/A is initiating minor compaction (all files) 2024-12-08T11:22:10,631 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/A in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,631 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cc66b2e1590f4586aa98185548f00d9c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/27dfa16e2a3445379daa0f3f22c5daf4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2f9d0d119b3d4855ab2e1dad8b522b5a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/ada1fdc24491410d9997826470294c4f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=55.9 K 2024-12-08T11:22:10,631 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc66b2e1590f4586aa98185548f00d9c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733656927394 2024-12-08T11:22:10,631 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting de0fc0140ebe4d76b66db99d4dd180bb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733656927394 2024-12-08T11:22:10,631 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27dfa16e2a3445379daa0f3f22c5daf4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733656928560 2024-12-08T11:22:10,631 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting cb9745776963465b86d4a423df819cba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733656928560 2024-12-08T11:22:10,632 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f9d0d119b3d4855ab2e1dad8b522b5a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733656929545 2024-12-08T11:22:10,632 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e9447dd11cb24dd199e08cf4b2896a23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733656929545 2024-12-08T11:22:10,632 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting ada1fdc24491410d9997826470294c4f, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=478, earliestPutTs=1733656930166 2024-12-08T11:22:10,632 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 034bf338ef8d4327ab4b5c2212d500c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=478, earliestPutTs=1733656930166 2024-12-08T11:22:10,639 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#B#compaction#506 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:10,640 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/a067089466eb4ca3a0c7fe32291cb648 is 50, key is test_row_0/B:col10/1733656930166/Put/seqid=0 2024-12-08T11:22:10,640 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#A#compaction#507 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:10,641 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/418086697d794c89b5d06834ffe006f0 is 50, key is test_row_0/A:col10/1733656930166/Put/seqid=0 2024-12-08T11:22:10,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742411_1587 (size=13153) 2024-12-08T11:22:10,653 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/a067089466eb4ca3a0c7fe32291cb648 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a067089466eb4ca3a0c7fe32291cb648 2024-12-08T11:22:10,658 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/B of f908e109f52a6b3eb513553b88d88e13 into a067089466eb4ca3a0c7fe32291cb648(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:10,658 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:10,658 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/B, priority=12, startTime=1733656930629; duration=0sec 2024-12-08T11:22:10,658 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:10,658 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:B 2024-12-08T11:22:10,658 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:10,659 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:10,659 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): f908e109f52a6b3eb513553b88d88e13/C is initiating minor compaction (all files) 2024-12-08T11:22:10,659 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f908e109f52a6b3eb513553b88d88e13/C in TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,659 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3ef06deb315a4ba7b5723175339e2ecd, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d2bac5f025664e4ca88287fe6ca45c46, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/ebc541f82def45b8b36b64afbefdb27b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2c61de10c35c49309a78b42ffe75e77a] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp, totalSize=48.8 K 2024-12-08T11:22:10,660 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ef06deb315a4ba7b5723175339e2ecd, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=427, earliestPutTs=1733656927394 2024-12-08T11:22:10,660 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d2bac5f025664e4ca88287fe6ca45c46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1733656928560 2024-12-08T11:22:10,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742412_1588 (size=13153) 2024-12-08T11:22:10,660 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ebc541f82def45b8b36b64afbefdb27b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=467, earliestPutTs=1733656929545 2024-12-08T11:22:10,661 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2c61de10c35c49309a78b42ffe75e77a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=478, earliestPutTs=1733656930166 2024-12-08T11:22:10,668 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f908e109f52a6b3eb513553b88d88e13#C#compaction#508 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:10,668 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a1e81087380d433ea4072372aa3d061d is 50, key is test_row_0/C:col10/1733656930166/Put/seqid=0 2024-12-08T11:22:10,673 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/418086697d794c89b5d06834ffe006f0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/418086697d794c89b5d06834ffe006f0 2024-12-08T11:22:10,677 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/A of f908e109f52a6b3eb513553b88d88e13 into 418086697d794c89b5d06834ffe006f0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:10,677 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:10,677 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/A, priority=12, startTime=1733656930629; duration=0sec 2024-12-08T11:22:10,677 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:10,677 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:A 2024-12-08T11:22:10,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742413_1589 (size=13153) 2024-12-08T11:22:10,691 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/a1e81087380d433ea4072372aa3d061d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1e81087380d433ea4072372aa3d061d 2024-12-08T11:22:10,698 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f908e109f52a6b3eb513553b88d88e13/C of f908e109f52a6b3eb513553b88d88e13 into a1e81087380d433ea4072372aa3d061d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:10,698 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:10,698 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13., storeName=f908e109f52a6b3eb513553b88d88e13/C, priority=12, startTime=1733656930629; duration=0sec 2024-12-08T11:22:10,698 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:10,698 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f908e109f52a6b3eb513553b88d88e13:C 2024-12-08T11:22:10,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T11:22:10,711 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:10,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T11:22:10,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:10,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:10,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:10,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:10,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:10,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:10,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:10,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:10,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,712 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2d14773c69f8442e84ae0d60b888c643 is 50, key is test_row_0/A:col10/1733656930262/Put/seqid=0 2024-12-08T11:22:10,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742414_1590 (size=17181) 2024-12-08T11:22:10,733 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656990727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656990728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656990835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656990835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,864 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:10,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:10,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:10,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:10,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656990889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656990889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:10,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656990890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,017 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:11,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:11,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,025 DEBUG [Thread-2157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:63801 2024-12-08T11:22:11,025 DEBUG [Thread-2153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:63801 2024-12-08T11:22:11,025 DEBUG [Thread-2153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:11,025 DEBUG [Thread-2157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:11,026 DEBUG [Thread-2155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:63801 2024-12-08T11:22:11,026 DEBUG [Thread-2155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:11,028 DEBUG [Thread-2159 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:63801 2024-12-08T11:22:11,028 DEBUG [Thread-2159 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:11,029 DEBUG [Thread-2151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:63801 2024-12-08T11:22:11,029 DEBUG [Thread-2151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:11,038 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656991038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 296 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656991039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2d14773c69f8442e84ae0d60b888c643 2024-12-08T11:22:11,136 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ae1675c4581b4abcb44d980f5a8cf793 is 50, key is test_row_0/B:col10/1733656930262/Put/seqid=0 2024-12-08T11:22:11,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742415_1591 (size=12301) 2024-12-08T11:22:11,169 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,169 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:11,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:11,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,169 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T11:22:11,321 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656991341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656991341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38624 deadline: 1733656991391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38676 deadline: 1733656991394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38608 deadline: 1733656991397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,473 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:11,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:11,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,474 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ae1675c4581b4abcb44d980f5a8cf793 2024-12-08T11:22:11,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/5c32a23fe087446d9412fff24fae1d1d is 50, key is test_row_0/C:col10/1733656930262/Put/seqid=0 2024-12-08T11:22:11,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742416_1592 (size=12301) 2024-12-08T11:22:11,626 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:11,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:11,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,778 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,779 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,779 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38588 deadline: 1733656991844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,847 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 305 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1733656991846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,931 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:11,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:11,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] handler.RSProcedureHandler(58): pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=153 java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=153 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:11,949 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=508 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/5c32a23fe087446d9412fff24fae1d1d 2024-12-08T11:22:11,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/2d14773c69f8442e84ae0d60b888c643 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d14773c69f8442e84ae0d60b888c643 2024-12-08T11:22:11,955 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d14773c69f8442e84ae0d60b888c643, entries=250, sequenceid=508, filesize=16.8 K 2024-12-08T11:22:11,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/ae1675c4581b4abcb44d980f5a8cf793 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ae1675c4581b4abcb44d980f5a8cf793 2024-12-08T11:22:11,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ae1675c4581b4abcb44d980f5a8cf793, entries=150, sequenceid=508, filesize=12.0 K 2024-12-08T11:22:11,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/5c32a23fe087446d9412fff24fae1d1d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/5c32a23fe087446d9412fff24fae1d1d 2024-12-08T11:22:11,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/5c32a23fe087446d9412fff24fae1d1d, entries=150, sequenceid=508, filesize=12.0 K 2024-12-08T11:22:11,961 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for f908e109f52a6b3eb513553b88d88e13 in 1250ms, sequenceid=508, compaction requested=false 2024-12-08T11:22:11,961 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:12,083 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:12,084 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=153 2024-12-08T11:22:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:12,084 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-12-08T11:22:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:12,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:12,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/0419f477d8da41e783abb0cdb910e891 is 50, key is test_row_0/A:col10/1733656930722/Put/seqid=0 2024-12-08T11:22:12,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742417_1593 (size=9857) 2024-12-08T11:22:12,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T11:22:12,398 DEBUG [Thread-2144 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:63801 2024-12-08T11:22:12,398 DEBUG [Thread-2144 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:12,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:12,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. as already flushing 2024-12-08T11:22:12,403 DEBUG [Thread-2148 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:63801 2024-12-08T11:22:12,403 DEBUG [Thread-2148 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:12,405 DEBUG [Thread-2146 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:63801 2024-12-08T11:22:12,405 DEBUG [Thread-2146 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:12,492 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/0419f477d8da41e783abb0cdb910e891 2024-12-08T11:22:12,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/eab760e39fc54c67b1ec162a98722cb5 is 50, key is test_row_0/B:col10/1733656930722/Put/seqid=0 2024-12-08T11:22:12,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742418_1594 (size=9857) 2024-12-08T11:22:12,853 DEBUG [Thread-2142 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:63801 2024-12-08T11:22:12,853 DEBUG [Thread-2142 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:12,857 DEBUG [Thread-2140 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:63801 2024-12-08T11:22:12,858 DEBUG [Thread-2140 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:12,902 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/eab760e39fc54c67b1ec162a98722cb5 2024-12-08T11:22:12,908 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/fc5cfd2a528f4a04be3f8a58a1c56e4f is 50, key is test_row_0/C:col10/1733656930722/Put/seqid=0 2024-12-08T11:22:12,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742419_1595 (size=9857) 2024-12-08T11:22:13,312 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=517 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/fc5cfd2a528f4a04be3f8a58a1c56e4f 2024-12-08T11:22:13,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/0419f477d8da41e783abb0cdb910e891 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/0419f477d8da41e783abb0cdb910e891 2024-12-08T11:22:13,318 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/0419f477d8da41e783abb0cdb910e891, entries=100, sequenceid=517, filesize=9.6 K 2024-12-08T11:22:13,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/eab760e39fc54c67b1ec162a98722cb5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/eab760e39fc54c67b1ec162a98722cb5 2024-12-08T11:22:13,321 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/eab760e39fc54c67b1ec162a98722cb5, entries=100, sequenceid=517, filesize=9.6 K 2024-12-08T11:22:13,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/fc5cfd2a528f4a04be3f8a58a1c56e4f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/fc5cfd2a528f4a04be3f8a58a1c56e4f 2024-12-08T11:22:13,324 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/fc5cfd2a528f4a04be3f8a58a1c56e4f, entries=100, sequenceid=517, filesize=9.6 K 2024-12-08T11:22:13,325 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=33.54 KB/34350 for f908e109f52a6b3eb513553b88d88e13 in 1240ms, sequenceid=517, compaction requested=true 2024-12-08T11:22:13,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:13,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:13,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-08T11:22:13,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-08T11:22:13,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-08T11:22:13,326 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2240 sec 2024-12-08T11:22:13,327 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=152, table=TestAcidGuarantees in 3.2270 sec 2024-12-08T11:22:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-08T11:22:14,206 INFO [Thread-2150 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 152 completed 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 166 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 161 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 25 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 28 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2369 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7107 rows 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2378 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7134 rows 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2372 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7116 rows 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2358 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7074 rows 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2369 2024-12-08T11:22:14,206 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7107 rows 2024-12-08T11:22:14,206 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T11:22:14,206 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x59bd764a to 127.0.0.1:63801 2024-12-08T11:22:14,206 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:14,208 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T11:22:14,208 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T11:22:14,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:14,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T11:22:14,211 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656934211"}]},"ts":"1733656934211"} 2024-12-08T11:22:14,213 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T11:22:14,216 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T11:22:14,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:22:14,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f908e109f52a6b3eb513553b88d88e13, UNASSIGN}] 2024-12-08T11:22:14,217 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f908e109f52a6b3eb513553b88d88e13, UNASSIGN 2024-12-08T11:22:14,218 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=f908e109f52a6b3eb513553b88d88e13, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:22:14,219 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:22:14,219 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; CloseRegionProcedure f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:22:14,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T11:22:14,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:14,370 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(124): Close f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:14,370 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:22:14,370 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1681): Closing f908e109f52a6b3eb513553b88d88e13, disabling compactions & flushes 2024-12-08T11:22:14,370 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:14,370 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:14,370 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. after waiting 0 ms 2024-12-08T11:22:14,370 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:14,371 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(2837): Flushing f908e109f52a6b3eb513553b88d88e13 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-08T11:22:14,371 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=A 2024-12-08T11:22:14,371 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:14,371 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=B 2024-12-08T11:22:14,371 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:14,371 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f908e109f52a6b3eb513553b88d88e13, store=C 2024-12-08T11:22:14,371 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:14,374 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/35c7f884fe5946bcab6fd598b3386730 is 50, key is test_row_0/A:col10/1733656932404/Put/seqid=0 2024-12-08T11:22:14,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742420_1596 (size=12301) 2024-12-08T11:22:14,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T11:22:14,778 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=525 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/35c7f884fe5946bcab6fd598b3386730 2024-12-08T11:22:14,783 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2b723dfe1095444bb640de1aa01086b9 is 50, key is test_row_0/B:col10/1733656932404/Put/seqid=0 2024-12-08T11:22:14,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742421_1597 (size=12301) 2024-12-08T11:22:14,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T11:22:15,186 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=525 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2b723dfe1095444bb640de1aa01086b9 2024-12-08T11:22:15,191 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4122446166ff476893808757b495def2 is 50, key is test_row_0/C:col10/1733656932404/Put/seqid=0 2024-12-08T11:22:15,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742422_1598 (size=12301) 2024-12-08T11:22:15,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T11:22:15,597 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=525 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4122446166ff476893808757b495def2 2024-12-08T11:22:15,600 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/A/35c7f884fe5946bcab6fd598b3386730 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/35c7f884fe5946bcab6fd598b3386730 2024-12-08T11:22:15,602 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/35c7f884fe5946bcab6fd598b3386730, entries=150, sequenceid=525, filesize=12.0 K 2024-12-08T11:22:15,603 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/B/2b723dfe1095444bb640de1aa01086b9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b723dfe1095444bb640de1aa01086b9 2024-12-08T11:22:15,605 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b723dfe1095444bb640de1aa01086b9, entries=150, sequenceid=525, filesize=12.0 K 2024-12-08T11:22:15,606 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/.tmp/C/4122446166ff476893808757b495def2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4122446166ff476893808757b495def2 2024-12-08T11:22:15,608 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4122446166ff476893808757b495def2, entries=150, sequenceid=525, filesize=12.0 K 2024-12-08T11:22:15,609 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for f908e109f52a6b3eb513553b88d88e13 in 1239ms, sequenceid=525, compaction requested=true 2024-12-08T11:22:15,609 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d9e35e06e25842ef86c836360d49504a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cb93435b5ee640b498490412a6c01b3e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/aab5f2e56bb04d3aa4ee519ecf95adab, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/af80974ab0a6477d81ca8d685c3e397f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4ec3c3721d7d4129b701a08bc1a97ca7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/99f5c1d16e934660a0b43a3175c199da, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e804dff5afd24973afcd0473b515061f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e940da723ca24ef8bf277599626c8fd5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d6245f245db7496cbcaf7b226bbcb6e8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/780c8ef4ed754d9dbc060870fb093e96, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/52134c0332b04c64bb2778d0fa6d621c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/09773055b49847bfa8ce5d702eb2e318, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d60cf74b0a0418294fdf19404108640, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/53906e259aec4dfa85dd95d1997e2c06, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/da5b345c8f4f428e85f3ea4d56fb845b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/751f3af06495449d9193aef13bbd3fcf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/6da2d5e4f22f4258bd95a3a70bd60284, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/828f6dc57fed4a5e978aab5ef455480b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e6b3e8ee31d247458dc7026095d8c427, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/1262a4be36e8445fa6c58c290400109c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f28a3e4cfc774d7a9780dbc225e26587, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cc66b2e1590f4586aa98185548f00d9c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/27dfa16e2a3445379daa0f3f22c5daf4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2f9d0d119b3d4855ab2e1dad8b522b5a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/ada1fdc24491410d9997826470294c4f] to archive 2024-12-08T11:22:15,610 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:22:15,611 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d9e35e06e25842ef86c836360d49504a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d9e35e06e25842ef86c836360d49504a 2024-12-08T11:22:15,612 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cb93435b5ee640b498490412a6c01b3e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cb93435b5ee640b498490412a6c01b3e 2024-12-08T11:22:15,613 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/aab5f2e56bb04d3aa4ee519ecf95adab to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/aab5f2e56bb04d3aa4ee519ecf95adab 2024-12-08T11:22:15,614 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/af80974ab0a6477d81ca8d685c3e397f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/af80974ab0a6477d81ca8d685c3e397f 2024-12-08T11:22:15,616 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4ec3c3721d7d4129b701a08bc1a97ca7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/4ec3c3721d7d4129b701a08bc1a97ca7 2024-12-08T11:22:15,617 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/99f5c1d16e934660a0b43a3175c199da to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/99f5c1d16e934660a0b43a3175c199da 2024-12-08T11:22:15,618 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e804dff5afd24973afcd0473b515061f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e804dff5afd24973afcd0473b515061f 2024-12-08T11:22:15,619 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e940da723ca24ef8bf277599626c8fd5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e940da723ca24ef8bf277599626c8fd5 2024-12-08T11:22:15,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d6245f245db7496cbcaf7b226bbcb6e8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/d6245f245db7496cbcaf7b226bbcb6e8 2024-12-08T11:22:15,620 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/780c8ef4ed754d9dbc060870fb093e96 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/780c8ef4ed754d9dbc060870fb093e96 2024-12-08T11:22:15,621 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/52134c0332b04c64bb2778d0fa6d621c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/52134c0332b04c64bb2778d0fa6d621c 2024-12-08T11:22:15,622 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/09773055b49847bfa8ce5d702eb2e318 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/09773055b49847bfa8ce5d702eb2e318 2024-12-08T11:22:15,622 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d60cf74b0a0418294fdf19404108640 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d60cf74b0a0418294fdf19404108640 2024-12-08T11:22:15,623 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/53906e259aec4dfa85dd95d1997e2c06 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/53906e259aec4dfa85dd95d1997e2c06 2024-12-08T11:22:15,624 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/da5b345c8f4f428e85f3ea4d56fb845b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/da5b345c8f4f428e85f3ea4d56fb845b 2024-12-08T11:22:15,625 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/751f3af06495449d9193aef13bbd3fcf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/751f3af06495449d9193aef13bbd3fcf 2024-12-08T11:22:15,626 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/6da2d5e4f22f4258bd95a3a70bd60284 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/6da2d5e4f22f4258bd95a3a70bd60284 2024-12-08T11:22:15,626 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/828f6dc57fed4a5e978aab5ef455480b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/828f6dc57fed4a5e978aab5ef455480b 2024-12-08T11:22:15,627 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e6b3e8ee31d247458dc7026095d8c427 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/e6b3e8ee31d247458dc7026095d8c427 2024-12-08T11:22:15,628 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/1262a4be36e8445fa6c58c290400109c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/1262a4be36e8445fa6c58c290400109c 2024-12-08T11:22:15,629 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f28a3e4cfc774d7a9780dbc225e26587 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/f28a3e4cfc774d7a9780dbc225e26587 2024-12-08T11:22:15,629 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cc66b2e1590f4586aa98185548f00d9c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/cc66b2e1590f4586aa98185548f00d9c 2024-12-08T11:22:15,630 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/27dfa16e2a3445379daa0f3f22c5daf4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/27dfa16e2a3445379daa0f3f22c5daf4 2024-12-08T11:22:15,631 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2f9d0d119b3d4855ab2e1dad8b522b5a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2f9d0d119b3d4855ab2e1dad8b522b5a 2024-12-08T11:22:15,632 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/ada1fdc24491410d9997826470294c4f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/ada1fdc24491410d9997826470294c4f 2024-12-08T11:22:15,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/db4221266bea412b96e3d3a09cb32b46, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2499288ecc0d45f69fedccdbf030136f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8c681efc56df4db790b74175e21e05e0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f18aecfd391e4a0e8868b4874e7af505, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a1851ab42f4047da8891905ca28cfba4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ec38f3fbbee947cf90f96ca7fbe19ecc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b716bef6ed584049bfd989d6daf1edeb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2554754d16894c3f9717740abd8b4c56, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/fb5c2b062dbf44df8c54154c990a6868, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/847e168d1e7542cd8bcee808be55e80f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/5efda39330d341219bd524839b13e196, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/430f8a4f532a4f83971c4ce06321ccea, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de434c72b89a459dbce809b47d505df7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8094ae3ded064649823bbd2306ede7ec, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1cb8a20404ae41e1a9c0094847ea4bb1, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/770e5fcd98e9412ea4342bb79c5bfc52, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b57a2b58278b4d1689b30b3a8591af23, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b0382b324db141d097c969a92d38894b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e3e3bbdc049944fcae7be3655958c7be, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1836a6b7274045be8ad2e58acd733807, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de0fc0140ebe4d76b66db99d4dd180bb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c138ea1c5150439a9c7d758f2e65b64a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/cb9745776963465b86d4a423df819cba, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e9447dd11cb24dd199e08cf4b2896a23, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/034bf338ef8d4327ab4b5c2212d500c9] to archive 2024-12-08T11:22:15,633 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:22:15,634 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/db4221266bea412b96e3d3a09cb32b46 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/db4221266bea412b96e3d3a09cb32b46 2024-12-08T11:22:15,635 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2499288ecc0d45f69fedccdbf030136f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2499288ecc0d45f69fedccdbf030136f 2024-12-08T11:22:15,636 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8c681efc56df4db790b74175e21e05e0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8c681efc56df4db790b74175e21e05e0 2024-12-08T11:22:15,637 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f18aecfd391e4a0e8868b4874e7af505 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/f18aecfd391e4a0e8868b4874e7af505 2024-12-08T11:22:15,637 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a1851ab42f4047da8891905ca28cfba4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a1851ab42f4047da8891905ca28cfba4 2024-12-08T11:22:15,638 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ec38f3fbbee947cf90f96ca7fbe19ecc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ec38f3fbbee947cf90f96ca7fbe19ecc 2024-12-08T11:22:15,639 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b716bef6ed584049bfd989d6daf1edeb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b716bef6ed584049bfd989d6daf1edeb 2024-12-08T11:22:15,640 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2554754d16894c3f9717740abd8b4c56 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2554754d16894c3f9717740abd8b4c56 2024-12-08T11:22:15,641 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/fb5c2b062dbf44df8c54154c990a6868 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/fb5c2b062dbf44df8c54154c990a6868 2024-12-08T11:22:15,641 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/847e168d1e7542cd8bcee808be55e80f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/847e168d1e7542cd8bcee808be55e80f 2024-12-08T11:22:15,642 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/5efda39330d341219bd524839b13e196 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/5efda39330d341219bd524839b13e196 2024-12-08T11:22:15,643 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/430f8a4f532a4f83971c4ce06321ccea to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/430f8a4f532a4f83971c4ce06321ccea 2024-12-08T11:22:15,644 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de434c72b89a459dbce809b47d505df7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de434c72b89a459dbce809b47d505df7 2024-12-08T11:22:15,645 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8094ae3ded064649823bbd2306ede7ec to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/8094ae3ded064649823bbd2306ede7ec 2024-12-08T11:22:15,646 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1cb8a20404ae41e1a9c0094847ea4bb1 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1cb8a20404ae41e1a9c0094847ea4bb1 2024-12-08T11:22:15,646 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/770e5fcd98e9412ea4342bb79c5bfc52 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/770e5fcd98e9412ea4342bb79c5bfc52 2024-12-08T11:22:15,647 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b57a2b58278b4d1689b30b3a8591af23 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b57a2b58278b4d1689b30b3a8591af23 2024-12-08T11:22:15,648 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b0382b324db141d097c969a92d38894b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/b0382b324db141d097c969a92d38894b 2024-12-08T11:22:15,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e3e3bbdc049944fcae7be3655958c7be to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e3e3bbdc049944fcae7be3655958c7be 2024-12-08T11:22:15,649 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1836a6b7274045be8ad2e58acd733807 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/1836a6b7274045be8ad2e58acd733807 2024-12-08T11:22:15,650 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de0fc0140ebe4d76b66db99d4dd180bb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/de0fc0140ebe4d76b66db99d4dd180bb 2024-12-08T11:22:15,651 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c138ea1c5150439a9c7d758f2e65b64a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/c138ea1c5150439a9c7d758f2e65b64a 2024-12-08T11:22:15,652 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/cb9745776963465b86d4a423df819cba to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/cb9745776963465b86d4a423df819cba 2024-12-08T11:22:15,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e9447dd11cb24dd199e08cf4b2896a23 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/e9447dd11cb24dd199e08cf4b2896a23 2024-12-08T11:22:15,653 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/034bf338ef8d4327ab4b5c2212d500c9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/034bf338ef8d4327ab4b5c2212d500c9 2024-12-08T11:22:15,654 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a3e276c2e340419c938c1d80b7350588, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/44acfde678744b6a86aa48d9559fa402, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4d3347620dc46eb94b4d694ed9b4d33, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/b756ebdea0394bdaa2b0f21f8a381c94, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/7de9919f84454856b18276baa3b136c7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2b697930d0794f9c9e8aa33b9d19d799, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4dae68602e7e408faccd4aafe10ca4af, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/52dca8cceb6643198a0333ab2f88ab32, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1d52907892c475a994ba87ba3f746a0, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4cfb10264126460abff8b29343914286, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bcac9ca1d2574fdb9e79ae4f816b2361, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0b3addac640e442ab0b9c8a643b57b37, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/740bae1834584f9fa1eff6c23294a97c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/80f28c7d3024486c8eaca6c0b0e9a851, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0793560afad44d9da250b63d37bea9e3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/8a0fb136460c4e7cb3a49615db2d2656, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4b92b7aa76643bc85ff015cad3e64dc, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0a5f9456fb7244518434158e819382d3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3c656caeef16470098f28417d0acf82b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/08a9de36af084b04a8c7471073f2078c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3ef06deb315a4ba7b5723175339e2ecd, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/884599723f6d4c0d860c9e83020c085f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d2bac5f025664e4ca88287fe6ca45c46, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/ebc541f82def45b8b36b64afbefdb27b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2c61de10c35c49309a78b42ffe75e77a] to archive 2024-12-08T11:22:15,655 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:22:15,656 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a3e276c2e340419c938c1d80b7350588 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a3e276c2e340419c938c1d80b7350588 2024-12-08T11:22:15,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/44acfde678744b6a86aa48d9559fa402 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/44acfde678744b6a86aa48d9559fa402 2024-12-08T11:22:15,657 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4d3347620dc46eb94b4d694ed9b4d33 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4d3347620dc46eb94b4d694ed9b4d33 2024-12-08T11:22:15,658 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/b756ebdea0394bdaa2b0f21f8a381c94 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/b756ebdea0394bdaa2b0f21f8a381c94 2024-12-08T11:22:15,659 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/7de9919f84454856b18276baa3b136c7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/7de9919f84454856b18276baa3b136c7 2024-12-08T11:22:15,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2b697930d0794f9c9e8aa33b9d19d799 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2b697930d0794f9c9e8aa33b9d19d799 2024-12-08T11:22:15,660 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4dae68602e7e408faccd4aafe10ca4af to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4dae68602e7e408faccd4aafe10ca4af 2024-12-08T11:22:15,661 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/52dca8cceb6643198a0333ab2f88ab32 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/52dca8cceb6643198a0333ab2f88ab32 2024-12-08T11:22:15,662 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1d52907892c475a994ba87ba3f746a0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1d52907892c475a994ba87ba3f746a0 2024-12-08T11:22:15,663 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4cfb10264126460abff8b29343914286 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4cfb10264126460abff8b29343914286 2024-12-08T11:22:15,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bcac9ca1d2574fdb9e79ae4f816b2361 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/bcac9ca1d2574fdb9e79ae4f816b2361 2024-12-08T11:22:15,664 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0b3addac640e442ab0b9c8a643b57b37 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0b3addac640e442ab0b9c8a643b57b37 2024-12-08T11:22:15,665 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/740bae1834584f9fa1eff6c23294a97c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/740bae1834584f9fa1eff6c23294a97c 2024-12-08T11:22:15,666 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/80f28c7d3024486c8eaca6c0b0e9a851 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/80f28c7d3024486c8eaca6c0b0e9a851 2024-12-08T11:22:15,667 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0793560afad44d9da250b63d37bea9e3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0793560afad44d9da250b63d37bea9e3 2024-12-08T11:22:15,667 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/8a0fb136460c4e7cb3a49615db2d2656 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/8a0fb136460c4e7cb3a49615db2d2656 2024-12-08T11:22:15,668 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4b92b7aa76643bc85ff015cad3e64dc to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/f4b92b7aa76643bc85ff015cad3e64dc 2024-12-08T11:22:15,669 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0a5f9456fb7244518434158e819382d3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/0a5f9456fb7244518434158e819382d3 2024-12-08T11:22:15,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3c656caeef16470098f28417d0acf82b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3c656caeef16470098f28417d0acf82b 2024-12-08T11:22:15,670 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/08a9de36af084b04a8c7471073f2078c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/08a9de36af084b04a8c7471073f2078c 2024-12-08T11:22:15,671 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3ef06deb315a4ba7b5723175339e2ecd to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/3ef06deb315a4ba7b5723175339e2ecd 2024-12-08T11:22:15,672 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/884599723f6d4c0d860c9e83020c085f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/884599723f6d4c0d860c9e83020c085f 2024-12-08T11:22:15,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d2bac5f025664e4ca88287fe6ca45c46 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/d2bac5f025664e4ca88287fe6ca45c46 2024-12-08T11:22:15,673 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/ebc541f82def45b8b36b64afbefdb27b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/ebc541f82def45b8b36b64afbefdb27b 2024-12-08T11:22:15,674 DEBUG [StoreCloser-TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2c61de10c35c49309a78b42ffe75e77a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/2c61de10c35c49309a78b42ffe75e77a 2024-12-08T11:22:15,677 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/recovered.edits/528.seqid, newMaxSeqId=528, maxSeqId=1 2024-12-08T11:22:15,678 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13. 2024-12-08T11:22:15,678 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1635): Region close journal for f908e109f52a6b3eb513553b88d88e13: 2024-12-08T11:22:15,679 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(170): Closed f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:15,679 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=f908e109f52a6b3eb513553b88d88e13, regionState=CLOSED 2024-12-08T11:22:15,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-08T11:22:15,681 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; CloseRegionProcedure f908e109f52a6b3eb513553b88d88e13, server=355ef6e50110,46083,1733656795491 in 1.4610 sec 2024-12-08T11:22:15,682 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-08T11:22:15,682 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f908e109f52a6b3eb513553b88d88e13, UNASSIGN in 1.4640 sec 2024-12-08T11:22:15,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-12-08T11:22:15,683 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4660 sec 2024-12-08T11:22:15,684 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656935684"}]},"ts":"1733656935684"} 2024-12-08T11:22:15,685 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T11:22:15,687 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T11:22:15,687 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4790 sec 2024-12-08T11:22:16,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-12-08T11:22:16,315 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-12-08T11:22:16,315 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T11:22:16,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:16,316 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=158, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:16,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T11:22:16,317 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=158, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:16,319 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:16,320 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/recovered.edits] 2024-12-08T11:22:16,322 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/0419f477d8da41e783abb0cdb910e891 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/0419f477d8da41e783abb0cdb910e891 2024-12-08T11:22:16,323 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d14773c69f8442e84ae0d60b888c643 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/2d14773c69f8442e84ae0d60b888c643 2024-12-08T11:22:16,324 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/35c7f884fe5946bcab6fd598b3386730 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/35c7f884fe5946bcab6fd598b3386730 2024-12-08T11:22:16,324 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/418086697d794c89b5d06834ffe006f0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/A/418086697d794c89b5d06834ffe006f0 2024-12-08T11:22:16,326 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b723dfe1095444bb640de1aa01086b9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/2b723dfe1095444bb640de1aa01086b9 2024-12-08T11:22:16,327 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a067089466eb4ca3a0c7fe32291cb648 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/a067089466eb4ca3a0c7fe32291cb648 2024-12-08T11:22:16,327 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ae1675c4581b4abcb44d980f5a8cf793 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/ae1675c4581b4abcb44d980f5a8cf793 2024-12-08T11:22:16,328 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/eab760e39fc54c67b1ec162a98722cb5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/B/eab760e39fc54c67b1ec162a98722cb5 2024-12-08T11:22:16,330 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4122446166ff476893808757b495def2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/4122446166ff476893808757b495def2 2024-12-08T11:22:16,331 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/5c32a23fe087446d9412fff24fae1d1d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/5c32a23fe087446d9412fff24fae1d1d 2024-12-08T11:22:16,332 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1e81087380d433ea4072372aa3d061d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/a1e81087380d433ea4072372aa3d061d 2024-12-08T11:22:16,333 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/fc5cfd2a528f4a04be3f8a58a1c56e4f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/C/fc5cfd2a528f4a04be3f8a58a1c56e4f 2024-12-08T11:22:16,335 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/recovered.edits/528.seqid to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13/recovered.edits/528.seqid 2024-12-08T11:22:16,336 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/f908e109f52a6b3eb513553b88d88e13 2024-12-08T11:22:16,336 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T11:22:16,337 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=158, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:16,339 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T11:22:16,340 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T11:22:16,341 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=158, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:16,341 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T11:22:16,341 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733656936341"}]},"ts":"9223372036854775807"} 2024-12-08T11:22:16,343 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T11:22:16,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f908e109f52a6b3eb513553b88d88e13, NAME => 'TestAcidGuarantees,,1733656908858.f908e109f52a6b3eb513553b88d88e13.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T11:22:16,344 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T11:22:16,344 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733656936344"}]},"ts":"9223372036854775807"} 2024-12-08T11:22:16,345 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T11:22:16,347 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=158, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:16,347 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 32 msec 2024-12-08T11:22:16,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-12-08T11:22:16,417 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 158 completed 2024-12-08T11:22:16,427 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=241 (was 242), OpenFileDescriptor=455 (was 456), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=501 (was 483) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 11), AvailableMemoryMB=7472 (was 6477) - AvailableMemoryMB LEAK? - 2024-12-08T11:22:16,436 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=241, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=501, ProcessCount=9, AvailableMemoryMB=7471 2024-12-08T11:22:16,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:22:16,437 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:22:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:16,439 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-08T11:22:16,439 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:16,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 159 2024-12-08T11:22:16,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-08T11:22:16,439 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-08T11:22:16,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742423_1599 (size=963) 2024-12-08T11:22:16,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-08T11:22:16,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-08T11:22:16,845 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c 2024-12-08T11:22:16,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742424_1600 (size=53) 2024-12-08T11:22:17,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-08T11:22:17,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:22:17,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing b64ae42323ee772e9f6012e0f6975e26, disabling compactions & flushes 2024-12-08T11:22:17,250 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,250 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. after waiting 0 ms 2024-12-08T11:22:17,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,251 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,251 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:17,251 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-08T11:22:17,252 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1733656937252"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733656937252"}]},"ts":"1733656937252"} 2024-12-08T11:22:17,253 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-08T11:22:17,253 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-08T11:22:17,253 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656937253"}]},"ts":"1733656937253"} 2024-12-08T11:22:17,254 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-08T11:22:17,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, ASSIGN}] 2024-12-08T11:22:17,258 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, ASSIGN 2024-12-08T11:22:17,259 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, ASSIGN; state=OFFLINE, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=false 2024-12-08T11:22:17,409 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:22:17,410 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; OpenRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:22:17,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-08T11:22:17,562 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:17,564 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,564 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7285): Opening region: {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:22:17,564 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,564 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:22:17,564 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7327): checking encryption for b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,564 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(7330): checking classloading for b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,565 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,566 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:22:17,567 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64ae42323ee772e9f6012e0f6975e26 columnFamilyName A 2024-12-08T11:22:17,567 DEBUG [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:17,567 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(327): Store=b64ae42323ee772e9f6012e0f6975e26/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:22:17,567 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,568 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:22:17,568 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64ae42323ee772e9f6012e0f6975e26 columnFamilyName B 2024-12-08T11:22:17,568 DEBUG [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:17,569 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(327): Store=b64ae42323ee772e9f6012e0f6975e26/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:22:17,569 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,569 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:22:17,570 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64ae42323ee772e9f6012e0f6975e26 columnFamilyName C 2024-12-08T11:22:17,570 DEBUG [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:17,570 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(327): Store=b64ae42323ee772e9f6012e0f6975e26/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:22:17,570 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,571 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,571 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,572 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:22:17,573 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1085): writing seq id for b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:17,574 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-08T11:22:17,574 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1102): Opened b64ae42323ee772e9f6012e0f6975e26; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64404209, jitterRate=-0.04030250012874603}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:22:17,575 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegion(1001): Region open journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:17,576 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., pid=161, masterSystemTime=1733656937561 2024-12-08T11:22:17,577 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,577 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=161}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:17,577 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=OPEN, openSeqNum=2, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:22:17,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-12-08T11:22:17,579 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; OpenRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 in 168 msec 2024-12-08T11:22:17,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-08T11:22:17,580 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, ASSIGN in 322 msec 2024-12-08T11:22:17,580 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-08T11:22:17,580 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656937580"}]},"ts":"1733656937580"} 2024-12-08T11:22:17,581 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-08T11:22:17,583 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=159, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-08T11:22:17,584 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1460 sec 2024-12-08T11:22:18,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-08T11:22:18,543 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-08T11:22:18,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2cbfd84f to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2209c520 2024-12-08T11:22:18,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5765d46a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:18,554 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:18,555 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:18,556 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-08T11:22:18,557 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43754, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-08T11:22:18,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-08T11:22:18,558 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-08T11:22:18,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:18,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742425_1601 (size=999) 2024-12-08T11:22:18,967 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-08T11:22:18,968 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-08T11:22:18,969 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:22:18,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, REOPEN/MOVE}] 2024-12-08T11:22:18,971 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, REOPEN/MOVE 2024-12-08T11:22:18,972 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:22:18,972 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:22:18,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=164, state=RUNNABLE; CloseRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:22:19,124 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,124 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(124): Close b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,124 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:22:19,124 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1681): Closing b64ae42323ee772e9f6012e0f6975e26, disabling compactions & flushes 2024-12-08T11:22:19,124 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,124 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,124 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. after waiting 0 ms 2024-12-08T11:22:19,124 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,127 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-08T11:22:19,128 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,128 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegion(1635): Region close journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:19,128 WARN [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] regionserver.HRegionServer(3786): Not adding moved region record: b64ae42323ee772e9f6012e0f6975e26 to self. 2024-12-08T11:22:19,129 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=165}] handler.UnassignRegionHandler(170): Closed b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,129 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=CLOSED 2024-12-08T11:22:19,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=164 2024-12-08T11:22:19,131 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=164, state=SUCCESS; CloseRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 in 157 msec 2024-12-08T11:22:19,131 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, REOPEN/MOVE; state=CLOSED, location=355ef6e50110,46083,1733656795491; forceNewPlan=false, retain=true 2024-12-08T11:22:19,282 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=OPENING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:22:19,434 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,437 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,437 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} 2024-12-08T11:22:19,437 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,437 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-08T11:22:19,437 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,437 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,438 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,439 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:22:19,439 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64ae42323ee772e9f6012e0f6975e26 columnFamilyName A 2024-12-08T11:22:19,440 DEBUG [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:19,440 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(327): Store=b64ae42323ee772e9f6012e0f6975e26/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:22:19,440 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,441 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:22:19,441 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64ae42323ee772e9f6012e0f6975e26 columnFamilyName B 2024-12-08T11:22:19,441 DEBUG [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:19,441 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(327): Store=b64ae42323ee772e9f6012e0f6975e26/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:22:19,441 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,442 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-08T11:22:19,442 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b64ae42323ee772e9f6012e0f6975e26 columnFamilyName C 2024-12-08T11:22:19,442 DEBUG [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:19,442 INFO [StoreOpener-b64ae42323ee772e9f6012e0f6975e26-1 {}] regionserver.HStore(327): Store=b64ae42323ee772e9f6012e0f6975e26/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-08T11:22:19,442 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,443 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,443 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,445 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-08T11:22:19,445 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,446 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened b64ae42323ee772e9f6012e0f6975e26; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64802828, jitterRate=-0.034362614154815674}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-08T11:22:19,447 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:19,447 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., pid=166, masterSystemTime=1733656939434 2024-12-08T11:22:19,448 DEBUG [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,448 INFO [RS_OPEN_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,448 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=OPEN, openSeqNum=5, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-12-08T11:22:19,450 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 in 166 msec 2024-12-08T11:22:19,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-08T11:22:19,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, REOPEN/MOVE in 479 msec 2024-12-08T11:22:19,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=163, resume processing ppid=162 2024-12-08T11:22:19,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 482 msec 2024-12-08T11:22:19,454 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 895 msec 2024-12-08T11:22:19,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-08T11:22:19,455 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fb684eb to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@537a66f8 2024-12-08T11:22:19,458 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac53e79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,459 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0644b7e6 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6094c70 2024-12-08T11:22:19,461 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc9c3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,462 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b5141 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@103dfc6e 2024-12-08T11:22:19,464 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,465 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11a52cdf to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e047c09 2024-12-08T11:22:19,470 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11030ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,470 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d7fe431 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60d631a3 2024-12-08T11:22:19,474 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abefea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,474 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58971172 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e757135 2024-12-08T11:22:19,481 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f6a59e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,481 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7fe93b to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7846cb78 2024-12-08T11:22:19,484 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e08ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,485 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-12-08T11:22:19,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,488 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-12-08T11:22:19,493 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,494 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e8cd1ae to 127.0.0.1:63801 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bb75907 2024-12-08T11:22:19,501 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c2838a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-08T11:22:19,504 DEBUG [hconnection-0x68ac9e07-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,504 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:19,504 DEBUG [hconnection-0x69621603-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,504 DEBUG [hconnection-0x58a352df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,505 DEBUG [hconnection-0x3ffeb4c5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,505 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,505 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-08T11:22:19,505 DEBUG [hconnection-0x62c389b2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,506 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46198, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,506 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46204, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,507 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,508 DEBUG [hconnection-0x3caad6bb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,508 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:19,508 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,509 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:19,509 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:19,509 DEBUG [hconnection-0x1e38354a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,510 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-08T11:22:19,510 DEBUG [hconnection-0x5c9562fc-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,511 DEBUG [hconnection-0x2c43ef83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,511 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,511 DEBUG [hconnection-0x253603d0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-08T11:22:19,512 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46248, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,512 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-08T11:22:19,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:22:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:19,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:19,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733656999533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733656999534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733656999535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733656999536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,539 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733656999537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,553 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ecf8a256501b4d61a86be9c2b0a14410_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656939513/Put/seqid=0 2024-12-08T11:22:19,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742426_1602 (size=12154) 2024-12-08T11:22:19,570 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:19,574 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208ecf8a256501b4d61a86be9c2b0a14410_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ecf8a256501b4d61a86be9c2b0a14410_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:19,575 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8b8014742d854d4399e5835bacaa6532, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:19,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8b8014742d854d4399e5835bacaa6532 is 175, key is test_row_0/A:col10/1733656939513/Put/seqid=0 2024-12-08T11:22:19,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742427_1603 (size=30955) 2024-12-08T11:22:19,586 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8b8014742d854d4399e5835bacaa6532 2024-12-08T11:22:19,610 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/9d2b4dac6de647beae89e2e3dad0bfa6 is 50, key is test_row_0/B:col10/1733656939513/Put/seqid=0 2024-12-08T11:22:19,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-08T11:22:19,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742428_1604 (size=12001) 2024-12-08T11:22:19,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733656999637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733656999638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733656999639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733656999639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733656999640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,661 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-08T11:22:19,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:19,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-08T11:22:19,813 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,814 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-08T11:22:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,814 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733656999839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733656999841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733656999841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733656999842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,844 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:19,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733656999843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:19,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-08T11:22:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:19,967 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:19,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/9d2b4dac6de647beae89e2e3dad0bfa6 2024-12-08T11:22:20,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/3c913a18d0b24232906b51a364b7baaf is 50, key is test_row_0/C:col10/1733656939513/Put/seqid=0 2024-12-08T11:22:20,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742429_1605 (size=12001) 2024-12-08T11:22:20,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-08T11:22:20,119 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,119 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-08T11:22:20,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:20,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:20,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:20,120 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657000141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657000144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657000145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657000145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657000147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,271 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,272 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-08T11:22:20,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:20,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:20,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:20,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,424 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-08T11:22:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:20,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:20,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/3c913a18d0b24232906b51a364b7baaf 2024-12-08T11:22:20,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8b8014742d854d4399e5835bacaa6532 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8b8014742d854d4399e5835bacaa6532 2024-12-08T11:22:20,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8b8014742d854d4399e5835bacaa6532, entries=150, sequenceid=16, filesize=30.2 K 2024-12-08T11:22:20,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/9d2b4dac6de647beae89e2e3dad0bfa6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9d2b4dac6de647beae89e2e3dad0bfa6 2024-12-08T11:22:20,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9d2b4dac6de647beae89e2e3dad0bfa6, entries=150, sequenceid=16, filesize=11.7 K 2024-12-08T11:22:20,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/3c913a18d0b24232906b51a364b7baaf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c913a18d0b24232906b51a364b7baaf 2024-12-08T11:22:20,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c913a18d0b24232906b51a364b7baaf, entries=150, sequenceid=16, filesize=11.7 K 2024-12-08T11:22:20,480 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for b64ae42323ee772e9f6012e0f6975e26 in 967ms, sequenceid=16, compaction requested=false 2024-12-08T11:22:20,480 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-08T11:22:20,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:20,577 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,577 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-08T11:22:20,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:20,578 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-08T11:22:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:20,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:20,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208464b15db921548e6b2b7f1e1a01c8100_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656939535/Put/seqid=0 2024-12-08T11:22:20,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742430_1606 (size=12154) 2024-12-08T11:22:20,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-08T11:22:20,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:20,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:20,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657000653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657000654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657000655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,659 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657000656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657000656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657000757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657000757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657000758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657000760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657000760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657000960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657000960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657000961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657000962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:20,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657000963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:20,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:20,998 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208464b15db921548e6b2b7f1e1a01c8100_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208464b15db921548e6b2b7f1e1a01c8100_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/9338d68abf7c4a20b0f62e299968d756, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/9338d68abf7c4a20b0f62e299968d756 is 175, key is test_row_0/A:col10/1733656939535/Put/seqid=0 2024-12-08T11:22:21,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742431_1607 (size=30955) 2024-12-08T11:22:21,200 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-08T11:22:21,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657001263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657001265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657001266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657001266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,269 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657001267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,404 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/9338d68abf7c4a20b0f62e299968d756 2024-12-08T11:22:21,411 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b289dc20e63a470b90593c400d4450c6 is 50, key is test_row_0/B:col10/1733656939535/Put/seqid=0 2024-12-08T11:22:21,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742432_1608 (size=12001) 2024-12-08T11:22:21,415 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b289dc20e63a470b90593c400d4450c6 2024-12-08T11:22:21,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e87440b1ffd0456c80b900cf1f2badaa is 50, key is test_row_0/C:col10/1733656939535/Put/seqid=0 2024-12-08T11:22:21,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742433_1609 (size=12001) 2024-12-08T11:22:21,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-08T11:22:21,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657001766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657001769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657001770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657001770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:21,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657001773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:21,826 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e87440b1ffd0456c80b900cf1f2badaa 2024-12-08T11:22:21,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/9338d68abf7c4a20b0f62e299968d756 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/9338d68abf7c4a20b0f62e299968d756 2024-12-08T11:22:21,834 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/9338d68abf7c4a20b0f62e299968d756, entries=150, sequenceid=41, filesize=30.2 K 2024-12-08T11:22:21,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b289dc20e63a470b90593c400d4450c6 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b289dc20e63a470b90593c400d4450c6 2024-12-08T11:22:21,837 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b289dc20e63a470b90593c400d4450c6, entries=150, sequenceid=41, filesize=11.7 K 2024-12-08T11:22:21,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e87440b1ffd0456c80b900cf1f2badaa as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e87440b1ffd0456c80b900cf1f2badaa 2024-12-08T11:22:21,842 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e87440b1ffd0456c80b900cf1f2badaa, entries=150, sequenceid=41, filesize=11.7 K 2024-12-08T11:22:21,842 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for b64ae42323ee772e9f6012e0f6975e26 in 1264ms, sequenceid=41, compaction requested=false 2024-12-08T11:22:21,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:21,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:21,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-08T11:22:21,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-08T11:22:21,845 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-08T11:22:21,845 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3350 sec 2024-12-08T11:22:21,846 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.3410 sec 2024-12-08T11:22:22,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:22,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-08T11:22:22,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:22,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:22,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:22,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:22,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:22,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:22,780 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120846cc1cae251c46dfae6a0898017e5c3a_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656942773/Put/seqid=0 2024-12-08T11:22:22,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742434_1610 (size=12154) 2024-12-08T11:22:22,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657002793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657002794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657002795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657002796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657002797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657002898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657002898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657002899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657002899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:22,901 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:22,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657002900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657003101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657003101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657003102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657003102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,106 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657003103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,185 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:23,188 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120846cc1cae251c46dfae6a0898017e5c3a_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120846cc1cae251c46dfae6a0898017e5c3a_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:23,189 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/aec3765596ff4c67946aad28fb12cbed, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:23,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/aec3765596ff4c67946aad28fb12cbed is 175, key is test_row_0/A:col10/1733656942773/Put/seqid=0 2024-12-08T11:22:23,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742435_1611 (size=30955) 2024-12-08T11:22:23,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657003403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657003404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657003407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657003407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657003407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,594 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/aec3765596ff4c67946aad28fb12cbed 2024-12-08T11:22:23,602 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c89299a030414b9ab7ae362b384d7f59 is 50, key is test_row_0/B:col10/1733656942773/Put/seqid=0 2024-12-08T11:22:23,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742436_1612 (size=12001) 2024-12-08T11:22:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-08T11:22:23,615 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-08T11:22:23,616 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-12-08T11:22:23,617 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-08T11:22:23,618 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:23,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-08T11:22:23,769 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-08T11:22:23,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:23,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:23,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:23,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:23,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:23,829 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-08T11:22:23,910 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657003909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657003910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657003910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657003911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:23,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657003912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-08T11:22:23,922 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:23,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-08T11:22:23,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:23,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:23,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:23,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:23,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:23,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,010 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c89299a030414b9ab7ae362b384d7f59 2024-12-08T11:22:24,017 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/31ff27fba10941fd8eca5866c84ce68f is 50, key is test_row_0/C:col10/1733656942773/Put/seqid=0 2024-12-08T11:22:24,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742437_1613 (size=12001) 2024-12-08T11:22:24,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-08T11:22:24,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:24,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-08T11:22:24,228 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,228 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-08T11:22:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,380 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-08T11:22:24,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:24,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] handler.RSProcedureHandler(58): pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=170 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=170 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:24,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/31ff27fba10941fd8eca5866c84ce68f 2024-12-08T11:22:24,427 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/aec3765596ff4c67946aad28fb12cbed as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/aec3765596ff4c67946aad28fb12cbed 2024-12-08T11:22:24,431 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/aec3765596ff4c67946aad28fb12cbed, entries=150, sequenceid=54, filesize=30.2 K 2024-12-08T11:22:24,432 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c89299a030414b9ab7ae362b384d7f59 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c89299a030414b9ab7ae362b384d7f59 2024-12-08T11:22:24,435 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c89299a030414b9ab7ae362b384d7f59, entries=150, sequenceid=54, filesize=11.7 K 2024-12-08T11:22:24,435 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/31ff27fba10941fd8eca5866c84ce68f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/31ff27fba10941fd8eca5866c84ce68f 2024-12-08T11:22:24,439 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/31ff27fba10941fd8eca5866c84ce68f, entries=150, sequenceid=54, filesize=11.7 K 2024-12-08T11:22:24,440 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for b64ae42323ee772e9f6012e0f6975e26 in 1667ms, sequenceid=54, compaction requested=true 2024-12-08T11:22:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:24,440 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:24,440 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:24,440 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:24,441 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:24,441 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:24,441 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/B is initiating minor compaction (all files) 2024-12-08T11:22:24,441 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/A is initiating minor compaction (all files) 2024-12-08T11:22:24,441 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/B in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,441 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/A in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,441 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9d2b4dac6de647beae89e2e3dad0bfa6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b289dc20e63a470b90593c400d4450c6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c89299a030414b9ab7ae362b384d7f59] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.2 K 2024-12-08T11:22:24,441 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8b8014742d854d4399e5835bacaa6532, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/9338d68abf7c4a20b0f62e299968d756, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/aec3765596ff4c67946aad28fb12cbed] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=90.7 K 2024-12-08T11:22:24,441 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,441 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8b8014742d854d4399e5835bacaa6532, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/9338d68abf7c4a20b0f62e299968d756, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/aec3765596ff4c67946aad28fb12cbed] 2024-12-08T11:22:24,442 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d2b4dac6de647beae89e2e3dad0bfa6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733656939510 2024-12-08T11:22:24,442 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b8014742d854d4399e5835bacaa6532, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733656939510 2024-12-08T11:22:24,442 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b289dc20e63a470b90593c400d4450c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733656939531 2024-12-08T11:22:24,442 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9338d68abf7c4a20b0f62e299968d756, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733656939531 2024-12-08T11:22:24,442 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c89299a030414b9ab7ae362b384d7f59, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733656940650 2024-12-08T11:22:24,442 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting aec3765596ff4c67946aad28fb12cbed, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733656940650 2024-12-08T11:22:24,448 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:24,454 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#B#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:24,454 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/0007cfb45c624c4a8ea232f6ac5ac97e is 50, key is test_row_0/B:col10/1733656942773/Put/seqid=0 2024-12-08T11:22:24,464 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412081a20a423bdb4444d9d9ab87282707b70_b64ae42323ee772e9f6012e0f6975e26 store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:24,466 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412081a20a423bdb4444d9d9ab87282707b70_b64ae42323ee772e9f6012e0f6975e26, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:24,466 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412081a20a423bdb4444d9d9ab87282707b70_b64ae42323ee772e9f6012e0f6975e26 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:24,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742438_1614 (size=12104) 2024-12-08T11:22:24,493 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/0007cfb45c624c4a8ea232f6ac5ac97e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/0007cfb45c624c4a8ea232f6ac5ac97e 2024-12-08T11:22:24,499 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/B of b64ae42323ee772e9f6012e0f6975e26 into 0007cfb45c624c4a8ea232f6ac5ac97e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:24,499 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:24,499 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/B, priority=13, startTime=1733656944440; duration=0sec 2024-12-08T11:22:24,499 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:24,499 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:B 2024-12-08T11:22:24,499 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:24,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742439_1615 (size=4469) 2024-12-08T11:22:24,501 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:24,502 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/C is initiating minor compaction (all files) 2024-12-08T11:22:24,502 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/C in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,502 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c913a18d0b24232906b51a364b7baaf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e87440b1ffd0456c80b900cf1f2badaa, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/31ff27fba10941fd8eca5866c84ce68f] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.2 K 2024-12-08T11:22:24,502 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#A#compaction#527 average throughput is 0.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:24,503 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8db62c4bf57e4d3e925a0946431b35f2 is 175, key is test_row_0/A:col10/1733656942773/Put/seqid=0 2024-12-08T11:22:24,504 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c913a18d0b24232906b51a364b7baaf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1733656939510 2024-12-08T11:22:24,505 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e87440b1ffd0456c80b900cf1f2badaa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1733656939531 2024-12-08T11:22:24,505 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 31ff27fba10941fd8eca5866c84ce68f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733656940650 2024-12-08T11:22:24,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742440_1616 (size=31058) 2024-12-08T11:22:24,513 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#C#compaction#529 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:24,514 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/979bef07575b4a2091d2d6bd458e6cb7 is 50, key is test_row_0/C:col10/1733656942773/Put/seqid=0 2024-12-08T11:22:24,517 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8db62c4bf57e4d3e925a0946431b35f2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8db62c4bf57e4d3e925a0946431b35f2 2024-12-08T11:22:24,521 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/A of b64ae42323ee772e9f6012e0f6975e26 into 8db62c4bf57e4d3e925a0946431b35f2(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:24,521 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:24,521 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/A, priority=13, startTime=1733656944440; duration=0sec 2024-12-08T11:22:24,521 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:24,521 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:A 2024-12-08T11:22:24,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742441_1617 (size=12104) 2024-12-08T11:22:24,532 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/979bef07575b4a2091d2d6bd458e6cb7 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/979bef07575b4a2091d2d6bd458e6cb7 2024-12-08T11:22:24,533 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-12-08T11:22:24,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:24,534 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-08T11:22:24,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:24,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:24,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:24,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:24,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:24,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:24,537 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/C of b64ae42323ee772e9f6012e0f6975e26 into 979bef07575b4a2091d2d6bd458e6cb7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:24,537 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:24,537 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/C, priority=13, startTime=1733656944440; duration=0sec 2024-12-08T11:22:24,537 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:24,537 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:C 2024-12-08T11:22:24,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085e1ad6eb663442b29b3e902b50e1905a_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656942795/Put/seqid=0 2024-12-08T11:22:24,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742442_1618 (size=12154) 2024-12-08T11:22:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-08T11:22:24,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:24,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:24,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:24,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:24,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657004922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657004922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:24,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657004923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:24,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657004925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:24,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657004925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:24,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:24,948 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085e1ad6eb663442b29b3e902b50e1905a_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085e1ad6eb663442b29b3e902b50e1905a_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:24,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/318b6e4d7bd34a91998d876b7065bf85, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:24,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/318b6e4d7bd34a91998d876b7065bf85 is 175, key is test_row_0/A:col10/1733656942795/Put/seqid=0 2024-12-08T11:22:24,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742443_1619 (size=30955) 2024-12-08T11:22:25,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657005026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,029 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657005027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,029 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657005028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657005032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657005228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657005230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657005231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657005234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,354 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/318b6e4d7bd34a91998d876b7065bf85 2024-12-08T11:22:25,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/eb2620b2a3034a3088b12424767295d5 is 50, key is test_row_0/B:col10/1733656942795/Put/seqid=0 2024-12-08T11:22:25,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742444_1620 (size=12001) 2024-12-08T11:22:25,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657005532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657005534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657005535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:25,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657005537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-08T11:22:25,765 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/eb2620b2a3034a3088b12424767295d5 2024-12-08T11:22:25,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2bc4e85b2a344dfd9409ded50c25059a is 50, key is test_row_0/C:col10/1733656942795/Put/seqid=0 2024-12-08T11:22:25,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742445_1621 (size=12001) 2024-12-08T11:22:25,777 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2bc4e85b2a344dfd9409ded50c25059a 2024-12-08T11:22:25,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/318b6e4d7bd34a91998d876b7065bf85 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/318b6e4d7bd34a91998d876b7065bf85 2024-12-08T11:22:25,785 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/318b6e4d7bd34a91998d876b7065bf85, entries=150, sequenceid=79, filesize=30.2 K 2024-12-08T11:22:25,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/eb2620b2a3034a3088b12424767295d5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/eb2620b2a3034a3088b12424767295d5 2024-12-08T11:22:25,790 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/eb2620b2a3034a3088b12424767295d5, entries=150, sequenceid=79, filesize=11.7 K 2024-12-08T11:22:25,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2bc4e85b2a344dfd9409ded50c25059a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2bc4e85b2a344dfd9409ded50c25059a 2024-12-08T11:22:25,794 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2bc4e85b2a344dfd9409ded50c25059a, entries=150, sequenceid=79, filesize=11.7 K 2024-12-08T11:22:25,795 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for b64ae42323ee772e9f6012e0f6975e26 in 1261ms, sequenceid=79, compaction requested=false 2024-12-08T11:22:25,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:25,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:25,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-12-08T11:22:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-12-08T11:22:25,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-08T11:22:25,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1780 sec 2024-12-08T11:22:25,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 2.1830 sec 2024-12-08T11:22:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:26,038 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-08T11:22:26,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:26,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:26,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:26,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:26,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:26,038 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:26,045 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083ed52982dd3b45099af50af35061ce3b_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656946037/Put/seqid=0 2024-12-08T11:22:26,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657006059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657006060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657006061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657006061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742446_1622 (size=12154) 2024-12-08T11:22:26,166 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657006164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657006164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657006165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657006165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657006367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657006368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657006368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,370 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657006369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,466 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:26,470 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412083ed52982dd3b45099af50af35061ce3b_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083ed52982dd3b45099af50af35061ce3b_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:26,471 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/dcbdf5d3689b481abef0c4dab0cd25eb, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:26,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/dcbdf5d3689b481abef0c4dab0cd25eb is 175, key is test_row_0/A:col10/1733656946037/Put/seqid=0 2024-12-08T11:22:26,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742447_1623 (size=30955) 2024-12-08T11:22:26,475 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/dcbdf5d3689b481abef0c4dab0cd25eb 2024-12-08T11:22:26,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5a39e0c4d4354468aed5d6785122d510 is 50, key is test_row_0/B:col10/1733656946037/Put/seqid=0 2024-12-08T11:22:26,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742448_1624 (size=12001) 2024-12-08T11:22:26,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657006670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657006671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657006672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657006673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5a39e0c4d4354468aed5d6785122d510 2024-12-08T11:22:26,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/144ea5a4a3db4e868b43b50f269e286c is 50, key is test_row_0/C:col10/1733656946037/Put/seqid=0 2024-12-08T11:22:26,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742449_1625 (size=12001) 2024-12-08T11:22:26,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:26,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657006939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:26,941 DEBUG [Thread-2637 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:22:27,175 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:27,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657007175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:27,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:27,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657007175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:27,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:27,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657007176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:27,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:27,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657007179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:27,302 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/144ea5a4a3db4e868b43b50f269e286c 2024-12-08T11:22:27,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/dcbdf5d3689b481abef0c4dab0cd25eb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/dcbdf5d3689b481abef0c4dab0cd25eb 2024-12-08T11:22:27,310 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/dcbdf5d3689b481abef0c4dab0cd25eb, entries=150, sequenceid=94, filesize=30.2 K 2024-12-08T11:22:27,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5a39e0c4d4354468aed5d6785122d510 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5a39e0c4d4354468aed5d6785122d510 2024-12-08T11:22:27,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5a39e0c4d4354468aed5d6785122d510, entries=150, sequenceid=94, filesize=11.7 K 2024-12-08T11:22:27,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/144ea5a4a3db4e868b43b50f269e286c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/144ea5a4a3db4e868b43b50f269e286c 2024-12-08T11:22:27,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/144ea5a4a3db4e868b43b50f269e286c, entries=150, sequenceid=94, filesize=11.7 K 2024-12-08T11:22:27,318 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for b64ae42323ee772e9f6012e0f6975e26 in 1280ms, sequenceid=94, compaction requested=true 2024-12-08T11:22:27,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:27,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:27,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:27,318 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:27,318 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:27,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:27,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:27,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:27,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:27,319 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:27,319 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/B is initiating minor compaction (all files) 2024-12-08T11:22:27,319 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/B in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:27,319 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/0007cfb45c624c4a8ea232f6ac5ac97e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/eb2620b2a3034a3088b12424767295d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5a39e0c4d4354468aed5d6785122d510] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.3 K 2024-12-08T11:22:27,319 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92968 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:27,320 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/A is initiating minor compaction (all files) 2024-12-08T11:22:27,320 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/A in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:27,320 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 0007cfb45c624c4a8ea232f6ac5ac97e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733656940650 2024-12-08T11:22:27,320 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8db62c4bf57e4d3e925a0946431b35f2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/318b6e4d7bd34a91998d876b7065bf85, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/dcbdf5d3689b481abef0c4dab0cd25eb] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=90.8 K 2024-12-08T11:22:27,320 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:27,320 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8db62c4bf57e4d3e925a0946431b35f2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/318b6e4d7bd34a91998d876b7065bf85, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/dcbdf5d3689b481abef0c4dab0cd25eb] 2024-12-08T11:22:27,320 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting eb2620b2a3034a3088b12424767295d5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656942792 2024-12-08T11:22:27,320 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8db62c4bf57e4d3e925a0946431b35f2, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733656940650 2024-12-08T11:22:27,320 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5a39e0c4d4354468aed5d6785122d510, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656944921 2024-12-08T11:22:27,321 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 318b6e4d7bd34a91998d876b7065bf85, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656942792 2024-12-08T11:22:27,321 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcbdf5d3689b481abef0c4dab0cd25eb, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656944921 2024-12-08T11:22:27,327 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#B#compaction#536 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:27,327 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:27,327 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5ad3952e743c4b93aea2b38b19659d9a is 50, key is test_row_0/B:col10/1733656946037/Put/seqid=0 2024-12-08T11:22:27,329 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208639b6ce4cb3f4de886dd4b36e47342d8_b64ae42323ee772e9f6012e0f6975e26 store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:27,331 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208639b6ce4cb3f4de886dd4b36e47342d8_b64ae42323ee772e9f6012e0f6975e26, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:27,331 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208639b6ce4cb3f4de886dd4b36e47342d8_b64ae42323ee772e9f6012e0f6975e26 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:27,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742450_1626 (size=12207) 2024-12-08T11:22:27,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742451_1627 (size=4469) 2024-12-08T11:22:27,347 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#A#compaction#537 average throughput is 1.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:27,348 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/37c69920a93346aea0b2db51dfef48ad is 175, key is test_row_0/A:col10/1733656946037/Put/seqid=0 2024-12-08T11:22:27,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742452_1628 (size=31161) 2024-12-08T11:22:27,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-08T11:22:27,722 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-08T11:22:27,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:27,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-12-08T11:22:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T11:22:27,725 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:27,725 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:27,725 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:27,741 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5ad3952e743c4b93aea2b38b19659d9a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5ad3952e743c4b93aea2b38b19659d9a 2024-12-08T11:22:27,745 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/B of b64ae42323ee772e9f6012e0f6975e26 into 5ad3952e743c4b93aea2b38b19659d9a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:27,745 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:27,745 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/B, priority=13, startTime=1733656947318; duration=0sec 2024-12-08T11:22:27,745 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:27,745 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:B 2024-12-08T11:22:27,745 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:27,746 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:27,746 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/C is initiating minor compaction (all files) 2024-12-08T11:22:27,746 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/C in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:27,746 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/979bef07575b4a2091d2d6bd458e6cb7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2bc4e85b2a344dfd9409ded50c25059a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/144ea5a4a3db4e868b43b50f269e286c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.3 K 2024-12-08T11:22:27,746 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 979bef07575b4a2091d2d6bd458e6cb7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1733656940650 2024-12-08T11:22:27,747 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bc4e85b2a344dfd9409ded50c25059a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1733656942792 2024-12-08T11:22:27,747 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 144ea5a4a3db4e868b43b50f269e286c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656944921 2024-12-08T11:22:27,762 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/37c69920a93346aea0b2db51dfef48ad as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/37c69920a93346aea0b2db51dfef48ad 2024-12-08T11:22:27,764 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#C#compaction#538 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:27,764 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/90894fee9197497ba6f6037f93eda8c8 is 50, key is test_row_0/C:col10/1733656946037/Put/seqid=0 2024-12-08T11:22:27,767 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/A of b64ae42323ee772e9f6012e0f6975e26 into 37c69920a93346aea0b2db51dfef48ad(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:27,767 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:27,767 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/A, priority=13, startTime=1733656947318; duration=0sec 2024-12-08T11:22:27,767 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:27,767 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:A 2024-12-08T11:22:27,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742453_1629 (size=12207) 2024-12-08T11:22:27,791 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/90894fee9197497ba6f6037f93eda8c8 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/90894fee9197497ba6f6037f93eda8c8 2024-12-08T11:22:27,795 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/C of b64ae42323ee772e9f6012e0f6975e26 into 90894fee9197497ba6f6037f93eda8c8(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:27,795 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:27,795 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/C, priority=13, startTime=1733656947318; duration=0sec 2024-12-08T11:22:27,795 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:27,795 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:C 2024-12-08T11:22:27,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T11:22:27,877 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:27,877 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-12-08T11:22:27,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:27,877 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-08T11:22:27,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:27,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:27,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:27,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:27,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:27,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:27,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208063d7727374b4d9e803ffe0b78d8a842_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656946057/Put/seqid=0 2024-12-08T11:22:27,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742454_1630 (size=12154) 2024-12-08T11:22:27,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:27,893 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208063d7727374b4d9e803ffe0b78d8a842_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208063d7727374b4d9e803ffe0b78d8a842_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:27,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/cf6ab20cbbaa4fd389ca33c9f8ad6158, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:27,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/cf6ab20cbbaa4fd389ca33c9f8ad6158 is 175, key is test_row_0/A:col10/1733656946057/Put/seqid=0 2024-12-08T11:22:27,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742455_1631 (size=30955) 2024-12-08T11:22:27,898 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/cf6ab20cbbaa4fd389ca33c9f8ad6158 2024-12-08T11:22:27,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/91c5158e08df4224a629750e5fe01deb is 50, key is test_row_0/B:col10/1733656946057/Put/seqid=0 2024-12-08T11:22:27,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742456_1632 (size=12001) 2024-12-08T11:22:28,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T11:22:28,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:28,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:28,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657008188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657008188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657008190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657008191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657008292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,294 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657008292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657008294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657008294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,309 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/91c5158e08df4224a629750e5fe01deb 2024-12-08T11:22:28,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/d4e5dfb1fa144d71bcdc712e3e5e7c47 is 50, key is test_row_0/C:col10/1733656946057/Put/seqid=0 2024-12-08T11:22:28,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742457_1633 (size=12001) 2024-12-08T11:22:28,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T11:22:28,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657008495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657008496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657008496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657008497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,720 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/d4e5dfb1fa144d71bcdc712e3e5e7c47 2024-12-08T11:22:28,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/cf6ab20cbbaa4fd389ca33c9f8ad6158 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/cf6ab20cbbaa4fd389ca33c9f8ad6158 2024-12-08T11:22:28,727 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/cf6ab20cbbaa4fd389ca33c9f8ad6158, entries=150, sequenceid=119, filesize=30.2 K 2024-12-08T11:22:28,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/91c5158e08df4224a629750e5fe01deb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/91c5158e08df4224a629750e5fe01deb 2024-12-08T11:22:28,730 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/91c5158e08df4224a629750e5fe01deb, entries=150, sequenceid=119, filesize=11.7 K 2024-12-08T11:22:28,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/d4e5dfb1fa144d71bcdc712e3e5e7c47 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/d4e5dfb1fa144d71bcdc712e3e5e7c47 2024-12-08T11:22:28,734 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/d4e5dfb1fa144d71bcdc712e3e5e7c47, entries=150, sequenceid=119, filesize=11.7 K 2024-12-08T11:22:28,735 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for b64ae42323ee772e9f6012e0f6975e26 in 858ms, sequenceid=119, compaction requested=false 2024-12-08T11:22:28,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:28,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:28,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-08T11:22:28,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-08T11:22:28,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-08T11:22:28,738 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0110 sec 2024-12-08T11:22:28,739 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.0150 sec 2024-12-08T11:22:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:28,801 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-08T11:22:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:28,804 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:28,811 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208888e37493ece40929253c4960ba915b0_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656948187/Put/seqid=0 2024-12-08T11:22:28,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742458_1634 (size=12304) 2024-12-08T11:22:28,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657008824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-12-08T11:22:28,828 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-12-08T11:22:28,828 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657008825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657008826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,829 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:28,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657008826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-12-08T11:22:28,830 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:28,831 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:28,831 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:28,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T11:22:28,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657008928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657008929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657008930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657008930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T11:22:28,982 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:28,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T11:22:28,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:28,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:28,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:28,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:28,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:28,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T11:22:29,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657009131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657009131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,135 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T11:22:29,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:29,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657009134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657009134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,218 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:29,221 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208888e37493ece40929253c4960ba915b0_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208888e37493ece40929253c4960ba915b0_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:29,222 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/17c07391214a462b9e492851978b62cf, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:29,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/17c07391214a462b9e492851978b62cf is 175, key is test_row_0/A:col10/1733656948187/Put/seqid=0 2024-12-08T11:22:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742459_1635 (size=31105) 2024-12-08T11:22:29,287 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T11:22:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T11:22:29,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657009435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657009436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,440 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T11:22:29,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:29,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657009438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657009439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,592 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T11:22:29,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:29,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,593 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:29,626 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=136, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/17c07391214a462b9e492851978b62cf 2024-12-08T11:22:29,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b81f5a57d87a423e992577f4309db892 is 50, key is test_row_0/B:col10/1733656948187/Put/seqid=0 2024-12-08T11:22:29,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742460_1636 (size=12151) 2024-12-08T11:22:29,660 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b81f5a57d87a423e992577f4309db892 2024-12-08T11:22:29,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e4dd3a54c15b4025a8b2f12b6a9a8f79 is 50, key is test_row_0/C:col10/1733656948187/Put/seqid=0 2024-12-08T11:22:29,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742461_1637 (size=12151) 2024-12-08T11:22:29,682 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=136 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e4dd3a54c15b4025a8b2f12b6a9a8f79 2024-12-08T11:22:29,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/17c07391214a462b9e492851978b62cf as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/17c07391214a462b9e492851978b62cf 2024-12-08T11:22:29,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/17c07391214a462b9e492851978b62cf, entries=150, sequenceid=136, filesize=30.4 K 2024-12-08T11:22:29,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b81f5a57d87a423e992577f4309db892 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b81f5a57d87a423e992577f4309db892 2024-12-08T11:22:29,695 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b81f5a57d87a423e992577f4309db892, entries=150, sequenceid=136, filesize=11.9 K 2024-12-08T11:22:29,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e4dd3a54c15b4025a8b2f12b6a9a8f79 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e4dd3a54c15b4025a8b2f12b6a9a8f79 2024-12-08T11:22:29,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e4dd3a54c15b4025a8b2f12b6a9a8f79, entries=150, sequenceid=136, filesize=11.9 K 2024-12-08T11:22:29,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for b64ae42323ee772e9f6012e0f6975e26 in 898ms, sequenceid=136, compaction requested=true 2024-12-08T11:22:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:29,699 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:29,699 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:29,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:29,700 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93221 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:29,700 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/A is initiating minor compaction (all files) 2024-12-08T11:22:29,700 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:29,700 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/A in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,700 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/B is initiating minor compaction (all files) 2024-12-08T11:22:29,700 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/37c69920a93346aea0b2db51dfef48ad, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/cf6ab20cbbaa4fd389ca33c9f8ad6158, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/17c07391214a462b9e492851978b62cf] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=91.0 K 2024-12-08T11:22:29,700 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/B in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,700 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,700 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5ad3952e743c4b93aea2b38b19659d9a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/91c5158e08df4224a629750e5fe01deb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b81f5a57d87a423e992577f4309db892] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.5 K 2024-12-08T11:22:29,700 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/37c69920a93346aea0b2db51dfef48ad, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/cf6ab20cbbaa4fd389ca33c9f8ad6158, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/17c07391214a462b9e492851978b62cf] 2024-12-08T11:22:29,701 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ad3952e743c4b93aea2b38b19659d9a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656944921 2024-12-08T11:22:29,701 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 37c69920a93346aea0b2db51dfef48ad, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656944921 2024-12-08T11:22:29,701 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf6ab20cbbaa4fd389ca33c9f8ad6158, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656946057 2024-12-08T11:22:29,701 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 91c5158e08df4224a629750e5fe01deb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656946057 2024-12-08T11:22:29,701 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b81f5a57d87a423e992577f4309db892, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656948187 2024-12-08T11:22:29,701 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17c07391214a462b9e492851978b62cf, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656948187 2024-12-08T11:22:29,708 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:29,710 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#B#compaction#546 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:29,710 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c6aca23b70af4115ac014e457287acd3 is 50, key is test_row_0/B:col10/1733656948187/Put/seqid=0 2024-12-08T11:22:29,710 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208e47b62cc347040a8baad22fcbaa29f40_b64ae42323ee772e9f6012e0f6975e26 store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:29,713 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208e47b62cc347040a8baad22fcbaa29f40_b64ae42323ee772e9f6012e0f6975e26, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:29,713 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208e47b62cc347040a8baad22fcbaa29f40_b64ae42323ee772e9f6012e0f6975e26 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:29,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742462_1638 (size=12459) 2024-12-08T11:22:29,722 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c6aca23b70af4115ac014e457287acd3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c6aca23b70af4115ac014e457287acd3 2024-12-08T11:22:29,725 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/B of b64ae42323ee772e9f6012e0f6975e26 into c6aca23b70af4115ac014e457287acd3(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:29,725 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:29,725 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/B, priority=13, startTime=1733656949699; duration=0sec 2024-12-08T11:22:29,725 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:29,726 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:B 2024-12-08T11:22:29,726 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:29,726 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:29,726 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/C is initiating minor compaction (all files) 2024-12-08T11:22:29,727 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/C in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,727 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/90894fee9197497ba6f6037f93eda8c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/d4e5dfb1fa144d71bcdc712e3e5e7c47, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e4dd3a54c15b4025a8b2f12b6a9a8f79] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.5 K 2024-12-08T11:22:29,727 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 90894fee9197497ba6f6037f93eda8c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1733656944921 2024-12-08T11:22:29,727 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting d4e5dfb1fa144d71bcdc712e3e5e7c47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1733656946057 2024-12-08T11:22:29,727 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e4dd3a54c15b4025a8b2f12b6a9a8f79, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656948187 2024-12-08T11:22:29,734 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#C#compaction#547 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:29,734 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/a4f16b815e9041368be72b26e233b86c is 50, key is test_row_0/C:col10/1733656948187/Put/seqid=0 2024-12-08T11:22:29,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742463_1639 (size=4469) 2024-12-08T11:22:29,738 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#A#compaction#545 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:29,738 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/df049fa995c14f78a87e54973b2f069e is 175, key is test_row_0/A:col10/1733656948187/Put/seqid=0 2024-12-08T11:22:29,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742464_1640 (size=12459) 2024-12-08T11:22:29,744 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,747 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-12-08T11:22:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:29,747 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-08T11:22:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:29,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:29,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742465_1641 (size=31413) 2024-12-08T11:22:29,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120840ed08ef1081497c970d56a2c240ac20_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656948824/Put/seqid=0 2024-12-08T11:22:29,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742466_1642 (size=12304) 2024-12-08T11:22:29,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:29,770 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120840ed08ef1081497c970d56a2c240ac20_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120840ed08ef1081497c970d56a2c240ac20_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:29,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/0b07ec89594f47a0a272aedc60faf67d, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:29,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/0b07ec89594f47a0a272aedc60faf67d is 175, key is test_row_0/A:col10/1733656948824/Put/seqid=0 2024-12-08T11:22:29,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742467_1643 (size=31105) 2024-12-08T11:22:29,775 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=157, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/0b07ec89594f47a0a272aedc60faf67d 2024-12-08T11:22:29,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/14848a7cf4df43479b90e096bf27a11a is 50, key is test_row_0/B:col10/1733656948824/Put/seqid=0 2024-12-08T11:22:29,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742468_1644 (size=12151) 2024-12-08T11:22:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T11:22:29,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:29,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:29,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657009954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657009955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,957 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657009955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:29,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:29,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657009956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657010058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657010058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657010058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657010059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,147 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/a4f16b815e9041368be72b26e233b86c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a4f16b815e9041368be72b26e233b86c 2024-12-08T11:22:30,151 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/C of b64ae42323ee772e9f6012e0f6975e26 into a4f16b815e9041368be72b26e233b86c(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:30,151 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:30,151 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/C, priority=13, startTime=1733656949699; duration=0sec 2024-12-08T11:22:30,151 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:30,151 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:C 2024-12-08T11:22:30,159 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/df049fa995c14f78a87e54973b2f069e as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/df049fa995c14f78a87e54973b2f069e 2024-12-08T11:22:30,163 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/A of b64ae42323ee772e9f6012e0f6975e26 into df049fa995c14f78a87e54973b2f069e(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:30,163 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:30,163 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/A, priority=13, startTime=1733656949699; duration=0sec 2024-12-08T11:22:30,163 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:30,163 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:A 2024-12-08T11:22:30,184 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/14848a7cf4df43479b90e096bf27a11a 2024-12-08T11:22:30,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2cacfe3828e44d5da87c912ad4f8cf17 is 50, key is test_row_0/C:col10/1733656948824/Put/seqid=0 2024-12-08T11:22:30,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742469_1645 (size=12151) 2024-12-08T11:22:30,262 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657010262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657010262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657010262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,264 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657010262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,567 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657010565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657010565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657010566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:30,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657010567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:30,594 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2cacfe3828e44d5da87c912ad4f8cf17 2024-12-08T11:22:30,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/0b07ec89594f47a0a272aedc60faf67d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0b07ec89594f47a0a272aedc60faf67d 2024-12-08T11:22:30,602 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0b07ec89594f47a0a272aedc60faf67d, entries=150, sequenceid=157, filesize=30.4 K 2024-12-08T11:22:30,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/14848a7cf4df43479b90e096bf27a11a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/14848a7cf4df43479b90e096bf27a11a 2024-12-08T11:22:30,606 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/14848a7cf4df43479b90e096bf27a11a, entries=150, sequenceid=157, filesize=11.9 K 2024-12-08T11:22:30,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2cacfe3828e44d5da87c912ad4f8cf17 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2cacfe3828e44d5da87c912ad4f8cf17 2024-12-08T11:22:30,610 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2cacfe3828e44d5da87c912ad4f8cf17, entries=150, sequenceid=157, filesize=11.9 K 2024-12-08T11:22:30,611 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for b64ae42323ee772e9f6012e0f6975e26 in 864ms, sequenceid=157, compaction requested=false 2024-12-08T11:22:30,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:30,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:30,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-12-08T11:22:30,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-12-08T11:22:30,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-08T11:22:30,613 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7810 sec 2024-12-08T11:22:30,614 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.7840 sec 2024-12-08T11:22:30,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-08T11:22:30,935 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-08T11:22:30,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:30,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-12-08T11:22:30,938 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:30,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T11:22:30,938 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:30,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:30,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:30,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:22:30,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:30,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:30,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:30,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:30,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:30,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:30,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120821039d38be1247e1b45aae4a129bafe1_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656949954/Put/seqid=0 2024-12-08T11:22:30,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742470_1646 (size=14794) 2024-12-08T11:22:31,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657011033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T11:22:31,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657011069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657011071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657011073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,074 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657011073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,090 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:31,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:31,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657011136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T11:22:31,243 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:31,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:31,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657011340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,391 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:31,395 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120821039d38be1247e1b45aae4a129bafe1_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120821039d38be1247e1b45aae4a129bafe1_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:31,395 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/ca56cffd71d340d69ab942ba44537aae, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:31,396 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/ca56cffd71d340d69ab942ba44537aae is 175, key is test_row_0/A:col10/1733656949954/Put/seqid=0 2024-12-08T11:22:31,396 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:31,396 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:31,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742471_1647 (size=39749) 2024-12-08T11:22:31,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T11:22:31,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:31,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:31,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:31,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657011643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,702 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:31,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:31,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,802 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/ca56cffd71d340d69ab942ba44537aae 2024-12-08T11:22:31,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/9019f446fbcb45dd8956fe026e2dee82 is 50, key is test_row_0/B:col10/1733656949954/Put/seqid=0 2024-12-08T11:22:31,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742472_1648 (size=12151) 2024-12-08T11:22:31,814 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/9019f446fbcb45dd8956fe026e2dee82 2024-12-08T11:22:31,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/302a3e2e2c044a7b89be8724fd036c52 is 50, key is test_row_0/C:col10/1733656949954/Put/seqid=0 2024-12-08T11:22:31,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742473_1649 (size=12151) 2024-12-08T11:22:31,856 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:31,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:31,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:31,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:31,857 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:31,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:32,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:32,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:32,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:32,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:32,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:32,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T11:22:32,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:32,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657012070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,077 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:32,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657012075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:32,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657012078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,079 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:32,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657012078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,147 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:32,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657012145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,161 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:32,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:32,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,162 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:32,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:32,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:32,223 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/302a3e2e2c044a7b89be8724fd036c52 2024-12-08T11:22:32,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/ca56cffd71d340d69ab942ba44537aae as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/ca56cffd71d340d69ab942ba44537aae 2024-12-08T11:22:32,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/ca56cffd71d340d69ab942ba44537aae, entries=200, sequenceid=176, filesize=38.8 K 2024-12-08T11:22:32,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/9019f446fbcb45dd8956fe026e2dee82 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9019f446fbcb45dd8956fe026e2dee82 2024-12-08T11:22:32,233 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9019f446fbcb45dd8956fe026e2dee82, entries=150, sequenceid=176, filesize=11.9 K 2024-12-08T11:22:32,234 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/302a3e2e2c044a7b89be8724fd036c52 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/302a3e2e2c044a7b89be8724fd036c52 2024-12-08T11:22:32,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/302a3e2e2c044a7b89be8724fd036c52, entries=150, sequenceid=176, filesize=11.9 K 2024-12-08T11:22:32,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for b64ae42323ee772e9f6012e0f6975e26 in 1257ms, sequenceid=176, compaction requested=true 2024-12-08T11:22:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:32,237 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:32,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:32,237 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:32,238 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:32,238 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/A is initiating minor compaction (all files) 2024-12-08T11:22:32,238 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/A in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,238 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/df049fa995c14f78a87e54973b2f069e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0b07ec89594f47a0a272aedc60faf67d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/ca56cffd71d340d69ab942ba44537aae] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=99.9 K 2024-12-08T11:22:32,238 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,239 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/df049fa995c14f78a87e54973b2f069e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0b07ec89594f47a0a272aedc60faf67d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/ca56cffd71d340d69ab942ba44537aae] 2024-12-08T11:22:32,239 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:32,239 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting df049fa995c14f78a87e54973b2f069e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656948187 2024-12-08T11:22:32,239 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/B is initiating minor compaction (all files) 2024-12-08T11:22:32,239 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/B in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,239 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c6aca23b70af4115ac014e457287acd3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/14848a7cf4df43479b90e096bf27a11a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9019f446fbcb45dd8956fe026e2dee82] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.9 K 2024-12-08T11:22:32,239 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b07ec89594f47a0a272aedc60faf67d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733656948822 2024-12-08T11:22:32,239 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca56cffd71d340d69ab942ba44537aae, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656949954 2024-12-08T11:22:32,240 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c6aca23b70af4115ac014e457287acd3, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656948187 2024-12-08T11:22:32,240 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 14848a7cf4df43479b90e096bf27a11a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733656948822 2024-12-08T11:22:32,240 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 9019f446fbcb45dd8956fe026e2dee82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656949954 2024-12-08T11:22:32,244 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:32,246 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120810d9d7dcd3f1450f935bb69ccaf3bbb3_b64ae42323ee772e9f6012e0f6975e26 store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:32,246 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#B#compaction#555 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:32,247 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b9f43ec59b424acf9dec692dccb3f58a is 50, key is test_row_0/B:col10/1733656949954/Put/seqid=0 2024-12-08T11:22:32,248 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120810d9d7dcd3f1450f935bb69ccaf3bbb3_b64ae42323ee772e9f6012e0f6975e26, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:32,248 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120810d9d7dcd3f1450f935bb69ccaf3bbb3_b64ae42323ee772e9f6012e0f6975e26 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:32,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742474_1650 (size=12561) 2024-12-08T11:22:32,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742475_1651 (size=4469) 2024-12-08T11:22:32,254 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#A#compaction#554 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:32,254 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/0d2842a630344f959aee8d2bb4632474 is 175, key is test_row_0/A:col10/1733656949954/Put/seqid=0 2024-12-08T11:22:32,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742476_1652 (size=31515) 2024-12-08T11:22:32,264 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/0d2842a630344f959aee8d2bb4632474 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0d2842a630344f959aee8d2bb4632474 2024-12-08T11:22:32,268 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/A of b64ae42323ee772e9f6012e0f6975e26 into 0d2842a630344f959aee8d2bb4632474(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:32,268 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:32,268 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/A, priority=13, startTime=1733656952237; duration=0sec 2024-12-08T11:22:32,268 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:32,268 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:A 2024-12-08T11:22:32,268 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:32,269 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:32,269 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/C is initiating minor compaction (all files) 2024-12-08T11:22:32,269 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/C in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,269 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a4f16b815e9041368be72b26e233b86c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2cacfe3828e44d5da87c912ad4f8cf17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/302a3e2e2c044a7b89be8724fd036c52] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=35.9 K 2024-12-08T11:22:32,270 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4f16b815e9041368be72b26e233b86c, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=136, earliestPutTs=1733656948187 2024-12-08T11:22:32,270 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cacfe3828e44d5da87c912ad4f8cf17, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1733656948822 2024-12-08T11:22:32,270 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 302a3e2e2c044a7b89be8724fd036c52, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656949954 2024-12-08T11:22:32,276 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#C#compaction#556 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:32,277 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/eaa5e66180ad4127adc1c13569549c51 is 50, key is test_row_0/C:col10/1733656949954/Put/seqid=0 2024-12-08T11:22:32,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742477_1653 (size=12561) 2024-12-08T11:22:32,284 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/eaa5e66180ad4127adc1c13569549c51 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/eaa5e66180ad4127adc1c13569549c51 2024-12-08T11:22:32,288 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/C of b64ae42323ee772e9f6012e0f6975e26 into eaa5e66180ad4127adc1c13569549c51(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:32,288 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:32,288 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/C, priority=13, startTime=1733656952237; duration=0sec 2024-12-08T11:22:32,288 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:32,288 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:C 2024-12-08T11:22:32,313 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:32,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-12-08T11:22:32,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:32,314 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T11:22:32,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:32,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:32,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:32,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:32,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:32,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:32,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089af8cc10bc02492499997b5a839925a2_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656951024/Put/seqid=0 2024-12-08T11:22:32,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742478_1654 (size=12304) 2024-12-08T11:22:32,656 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b9f43ec59b424acf9dec692dccb3f58a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b9f43ec59b424acf9dec692dccb3f58a 2024-12-08T11:22:32,660 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/B of b64ae42323ee772e9f6012e0f6975e26 into b9f43ec59b424acf9dec692dccb3f58a(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:32,660 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:32,660 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/B, priority=13, startTime=1733656952237; duration=0sec 2024-12-08T11:22:32,660 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:32,660 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:B 2024-12-08T11:22:32,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:32,729 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412089af8cc10bc02492499997b5a839925a2_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089af8cc10bc02492499997b5a839925a2_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:32,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/af966c44d20c45539d51d1c2d65f9c4b, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:32,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/af966c44d20c45539d51d1c2d65f9c4b is 175, key is test_row_0/A:col10/1733656951024/Put/seqid=0 2024-12-08T11:22:32,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742479_1655 (size=31105) 2024-12-08T11:22:33,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T11:22:33,134 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/af966c44d20c45539d51d1c2d65f9c4b 2024-12-08T11:22:33,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5c8438cd11ef4eca9921138b68435c18 is 50, key is test_row_0/B:col10/1733656951024/Put/seqid=0 2024-12-08T11:22:33,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742480_1656 (size=12151) 2024-12-08T11:22:33,145 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5c8438cd11ef4eca9921138b68435c18 2024-12-08T11:22:33,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/940425bff65847d3ba48c1ab353622eb is 50, key is test_row_0/C:col10/1733656951024/Put/seqid=0 2024-12-08T11:22:33,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:33,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:33,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742481_1657 (size=12151) 2024-12-08T11:22:33,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:33,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657013181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:33,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:33,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657013284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:33,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:33,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657013486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:33,555 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/940425bff65847d3ba48c1ab353622eb 2024-12-08T11:22:33,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/af966c44d20c45539d51d1c2d65f9c4b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/af966c44d20c45539d51d1c2d65f9c4b 2024-12-08T11:22:33,562 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/af966c44d20c45539d51d1c2d65f9c4b, entries=150, sequenceid=197, filesize=30.4 K 2024-12-08T11:22:33,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5c8438cd11ef4eca9921138b68435c18 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5c8438cd11ef4eca9921138b68435c18 2024-12-08T11:22:33,566 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5c8438cd11ef4eca9921138b68435c18, entries=150, sequenceid=197, filesize=11.9 K 2024-12-08T11:22:33,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/940425bff65847d3ba48c1ab353622eb as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/940425bff65847d3ba48c1ab353622eb 2024-12-08T11:22:33,570 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/940425bff65847d3ba48c1ab353622eb, entries=150, sequenceid=197, filesize=11.9 K 2024-12-08T11:22:33,570 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for b64ae42323ee772e9f6012e0f6975e26 in 1256ms, sequenceid=197, compaction requested=false 2024-12-08T11:22:33,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:33,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:33,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-08T11:22:33,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-12-08T11:22:33,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-12-08T11:22:33,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6330 sec 2024-12-08T11:22:33,574 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 2.6360 sec 2024-12-08T11:22:33,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:33,791 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-08T11:22:33,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:33,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:33,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:33,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:33,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:33,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:33,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085bd5d62bab6c469cb9614f890a1d44ca_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656953168/Put/seqid=0 2024-12-08T11:22:33,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742482_1658 (size=14794) 2024-12-08T11:22:33,801 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:33,805 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412085bd5d62bab6c469cb9614f890a1d44ca_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085bd5d62bab6c469cb9614f890a1d44ca_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:33,806 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/356436eaa2fd4fb0932b4a5db77f5290, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:33,806 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/356436eaa2fd4fb0932b4a5db77f5290 is 175, key is test_row_0/A:col10/1733656953168/Put/seqid=0 2024-12-08T11:22:33,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742483_1659 (size=39749) 2024-12-08T11:22:33,810 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=216, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/356436eaa2fd4fb0932b4a5db77f5290 2024-12-08T11:22:33,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/e42764116141487fb11c6b3cb0f17285 is 50, key is test_row_0/B:col10/1733656953168/Put/seqid=0 2024-12-08T11:22:33,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742484_1660 (size=12151) 2024-12-08T11:22:33,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:33,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657013830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:33,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:33,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657013933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:34,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:34,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657014077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:34,079 DEBUG [Thread-2639 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4123 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:22:34,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657014080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:34,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:34,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657014080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:34,082 DEBUG [Thread-2645 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4127 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:22:34,083 DEBUG [Thread-2641 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:22:34,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:34,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657014083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:34,086 DEBUG [Thread-2643 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4131 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:22:34,136 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:34,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657014134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:34,220 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/e42764116141487fb11c6b3cb0f17285 2024-12-08T11:22:34,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/7c38130299f648f7bf3b29ebbade1ba3 is 50, key is test_row_0/C:col10/1733656953168/Put/seqid=0 2024-12-08T11:22:34,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742485_1661 (size=12151) 2024-12-08T11:22:34,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:34,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657014438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:34,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/7c38130299f648f7bf3b29ebbade1ba3 2024-12-08T11:22:34,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/356436eaa2fd4fb0932b4a5db77f5290 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/356436eaa2fd4fb0932b4a5db77f5290 2024-12-08T11:22:34,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/356436eaa2fd4fb0932b4a5db77f5290, entries=200, sequenceid=216, filesize=38.8 K 2024-12-08T11:22:34,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/e42764116141487fb11c6b3cb0f17285 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/e42764116141487fb11c6b3cb0f17285 2024-12-08T11:22:34,641 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/e42764116141487fb11c6b3cb0f17285, entries=150, sequenceid=216, filesize=11.9 K 2024-12-08T11:22:34,642 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/7c38130299f648f7bf3b29ebbade1ba3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/7c38130299f648f7bf3b29ebbade1ba3 2024-12-08T11:22:34,645 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/7c38130299f648f7bf3b29ebbade1ba3, entries=150, sequenceid=216, filesize=11.9 K 2024-12-08T11:22:34,645 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for b64ae42323ee772e9f6012e0f6975e26 in 854ms, sequenceid=216, compaction requested=true 2024-12-08T11:22:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:34,646 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:34,646 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:34,646 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:34,646 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:34,646 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:34,647 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/A is initiating minor compaction (all files) 2024-12-08T11:22:34,647 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/B is initiating minor compaction (all files) 2024-12-08T11:22:34,647 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/A in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:34,647 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/B in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:34,647 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b9f43ec59b424acf9dec692dccb3f58a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5c8438cd11ef4eca9921138b68435c18, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/e42764116141487fb11c6b3cb0f17285] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=36.0 K 2024-12-08T11:22:34,647 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0d2842a630344f959aee8d2bb4632474, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/af966c44d20c45539d51d1c2d65f9c4b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/356436eaa2fd4fb0932b4a5db77f5290] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=100.0 K 2024-12-08T11:22:34,647 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:34,647 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0d2842a630344f959aee8d2bb4632474, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/af966c44d20c45539d51d1c2d65f9c4b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/356436eaa2fd4fb0932b4a5db77f5290] 2024-12-08T11:22:34,647 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b9f43ec59b424acf9dec692dccb3f58a, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656949954 2024-12-08T11:22:34,647 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0d2842a630344f959aee8d2bb4632474, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656949954 2024-12-08T11:22:34,648 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c8438cd11ef4eca9921138b68435c18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656951024 2024-12-08T11:22:34,648 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting af966c44d20c45539d51d1c2d65f9c4b, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656951024 2024-12-08T11:22:34,648 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting e42764116141487fb11c6b3cb0f17285, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733656953168 2024-12-08T11:22:34,648 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 356436eaa2fd4fb0932b4a5db77f5290, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733656953168 2024-12-08T11:22:34,653 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:34,654 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#B#compaction#563 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:34,655 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/705cdb22a7d24b6c9aa81fad1e5c162f is 50, key is test_row_0/B:col10/1733656953168/Put/seqid=0 2024-12-08T11:22:34,656 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024120806967430e04c485bb47830c7c8916b02_b64ae42323ee772e9f6012e0f6975e26 store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:34,658 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024120806967430e04c485bb47830c7c8916b02_b64ae42323ee772e9f6012e0f6975e26, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:34,658 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120806967430e04c485bb47830c7c8916b02_b64ae42323ee772e9f6012e0f6975e26 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:34,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742486_1662 (size=12663) 2024-12-08T11:22:34,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742487_1663 (size=4469) 2024-12-08T11:22:34,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:34,946 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-08T11:22:34,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:34,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:34,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:34,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:34,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:34,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:34,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120803a6d437899048b686ed45b2602d29e4_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656953826/Put/seqid=0 2024-12-08T11:22:34,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742488_1664 (size=14794) 2024-12-08T11:22:34,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:34,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657014987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-12-08T11:22:35,043 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-12-08T11:22:35,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-12-08T11:22:35,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T11:22:35,045 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:35,046 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:35,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:35,070 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/705cdb22a7d24b6c9aa81fad1e5c162f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/705cdb22a7d24b6c9aa81fad1e5c162f 2024-12-08T11:22:35,070 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#A#compaction#564 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:35,071 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/fb3dffe77e45412190f57921b5aee6d5 is 175, key is test_row_0/A:col10/1733656953168/Put/seqid=0 2024-12-08T11:22:35,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742489_1665 (size=31617) 2024-12-08T11:22:35,075 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/B of b64ae42323ee772e9f6012e0f6975e26 into 705cdb22a7d24b6c9aa81fad1e5c162f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:35,075 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:35,075 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/B, priority=13, startTime=1733656954646; duration=0sec 2024-12-08T11:22:35,076 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:35,076 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:B 2024-12-08T11:22:35,076 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-08T11:22:35,077 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-08T11:22:35,077 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/C is initiating minor compaction (all files) 2024-12-08T11:22:35,077 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/C in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,077 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/eaa5e66180ad4127adc1c13569549c51, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/940425bff65847d3ba48c1ab353622eb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/7c38130299f648f7bf3b29ebbade1ba3] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=36.0 K 2024-12-08T11:22:35,077 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting eaa5e66180ad4127adc1c13569549c51, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1733656949954 2024-12-08T11:22:35,077 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 940425bff65847d3ba48c1ab353622eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1733656951024 2024-12-08T11:22:35,078 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c38130299f648f7bf3b29ebbade1ba3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733656953168 2024-12-08T11:22:35,082 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#C#compaction#566 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:35,083 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/c5afaf35cf8f4fd5ac25db898db72d33 is 50, key is test_row_0/C:col10/1733656953168/Put/seqid=0 2024-12-08T11:22:35,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742490_1666 (size=12663) 2024-12-08T11:22:35,092 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:35,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657015090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T11:22:35,197 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:35,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:35,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,198 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:35,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657015294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T11:22:35,350 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:35,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:35,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,356 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:35,360 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120803a6d437899048b686ed45b2602d29e4_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120803a6d437899048b686ed45b2602d29e4_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:35,360 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/75f1eb9efba1425b975acaeb3cf96bca, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:35,361 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/75f1eb9efba1425b975acaeb3cf96bca is 175, key is test_row_0/A:col10/1733656953826/Put/seqid=0 2024-12-08T11:22:35,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742491_1667 (size=39749) 2024-12-08T11:22:35,365 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/75f1eb9efba1425b975acaeb3cf96bca 2024-12-08T11:22:35,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/ed83d7c325a0476db85bff5e69151389 is 50, key is test_row_0/B:col10/1733656953826/Put/seqid=0 2024-12-08T11:22:35,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742492_1668 (size=12151) 2024-12-08T11:22:35,479 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/fb3dffe77e45412190f57921b5aee6d5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/fb3dffe77e45412190f57921b5aee6d5 2024-12-08T11:22:35,483 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/A of b64ae42323ee772e9f6012e0f6975e26 into fb3dffe77e45412190f57921b5aee6d5(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:35,483 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:35,483 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/A, priority=13, startTime=1733656954646; duration=0sec 2024-12-08T11:22:35,483 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:35,483 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:A 2024-12-08T11:22:35,490 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/c5afaf35cf8f4fd5ac25db898db72d33 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/c5afaf35cf8f4fd5ac25db898db72d33 2024-12-08T11:22:35,503 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:35,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:35,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,508 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/C of b64ae42323ee772e9f6012e0f6975e26 into c5afaf35cf8f4fd5ac25db898db72d33(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:35,508 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:35,508 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/C, priority=13, startTime=1733656954646; duration=0sec 2024-12-08T11:22:35,508 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:35,508 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:C 2024-12-08T11:22:35,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:35,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657015599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T11:22:35,656 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,656 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:35,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:35,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,657 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,777 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/ed83d7c325a0476db85bff5e69151389 2024-12-08T11:22:35,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/f064fc2100e845ab910abf0ce2d24978 is 50, key is test_row_0/C:col10/1733656953826/Put/seqid=0 2024-12-08T11:22:35,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742493_1669 (size=12151) 2024-12-08T11:22:35,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:35,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:35,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,809 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,961 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:35,961 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:35,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:35,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:35,962 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:35,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:36,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:36,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657016101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:36,114 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:36,114 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:36,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:36,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:36,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:36,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:36,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T11:22:36,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/f064fc2100e845ab910abf0ce2d24978 2024-12-08T11:22:36,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/75f1eb9efba1425b975acaeb3cf96bca as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/75f1eb9efba1425b975acaeb3cf96bca 2024-12-08T11:22:36,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/75f1eb9efba1425b975acaeb3cf96bca, entries=200, sequenceid=235, filesize=38.8 K 2024-12-08T11:22:36,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/ed83d7c325a0476db85bff5e69151389 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/ed83d7c325a0476db85bff5e69151389 2024-12-08T11:22:36,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/ed83d7c325a0476db85bff5e69151389, entries=150, sequenceid=235, filesize=11.9 K 2024-12-08T11:22:36,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/f064fc2100e845ab910abf0ce2d24978 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/f064fc2100e845ab910abf0ce2d24978 2024-12-08T11:22:36,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/f064fc2100e845ab910abf0ce2d24978, entries=150, sequenceid=235, filesize=11.9 K 2024-12-08T11:22:36,204 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for b64ae42323ee772e9f6012e0f6975e26 in 1258ms, sequenceid=235, compaction requested=false 2024-12-08T11:22:36,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:36,267 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:36,267 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-12-08T11:22:36,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:36,268 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-08T11:22:36,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:36,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:36,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:36,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:36,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:36,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:36,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120885ac977533ef4f0491418d413319591d_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656954956/Put/seqid=0 2024-12-08T11:22:36,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742494_1670 (size=12304) 2024-12-08T11:22:36,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,283 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024120885ac977533ef4f0491418d413319591d_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120885ac977533ef4f0491418d413319591d_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:36,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/721f186c9bad4cc5b009762bd16b4e89, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:36,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/721f186c9bad4cc5b009762bd16b4e89 is 175, key is test_row_0/A:col10/1733656954956/Put/seqid=0 2024-12-08T11:22:36,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742495_1671 (size=31105) 2024-12-08T11:22:36,289 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/721f186c9bad4cc5b009762bd16b4e89 2024-12-08T11:22:36,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b6dc446d97504068b70d7efb3063cb26 is 50, key is test_row_0/B:col10/1733656954956/Put/seqid=0 2024-12-08T11:22:36,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742496_1672 (size=12151) 2024-12-08T11:22:36,304 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b6dc446d97504068b70d7efb3063cb26 2024-12-08T11:22:36,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2973f9f5665549d786930f9b7c32fd4a is 50, key is test_row_0/C:col10/1733656954956/Put/seqid=0 2024-12-08T11:22:36,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742497_1673 (size=12151) 2024-12-08T11:22:36,314 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2973f9f5665549d786930f9b7c32fd4a 2024-12-08T11:22:36,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/721f186c9bad4cc5b009762bd16b4e89 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/721f186c9bad4cc5b009762bd16b4e89 2024-12-08T11:22:36,321 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/721f186c9bad4cc5b009762bd16b4e89, entries=150, sequenceid=255, filesize=30.4 K 2024-12-08T11:22:36,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/b6dc446d97504068b70d7efb3063cb26 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b6dc446d97504068b70d7efb3063cb26 2024-12-08T11:22:36,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,325 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b6dc446d97504068b70d7efb3063cb26, entries=150, sequenceid=255, filesize=11.9 K 2024-12-08T11:22:36,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/2973f9f5665549d786930f9b7c32fd4a as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2973f9f5665549d786930f9b7c32fd4a 2024-12-08T11:22:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,329 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2973f9f5665549d786930f9b7c32fd4a, entries=150, sequenceid=255, filesize=11.9 K 2024-12-08T11:22:36,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,329 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=0 B/0 for b64ae42323ee772e9f6012e0f6975e26 in 62ms, sequenceid=255, compaction requested=true 2024-12-08T11:22:36,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:36,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:36,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-08T11:22:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-12-08T11:22:36,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-12-08T11:22:36,331 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2840 sec 2024-12-08T11:22:36,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,332 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 1.2880 sec 2024-12-08T11:22:36,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:36,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-12-08T11:22:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,149 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-12-08T11:22:37,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-12-08T11:22:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,152 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:37,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T11:22:37,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,153 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:37,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,153 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:37,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:37,229 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:22:37,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:37,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:37,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:37,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:37,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:37,229 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d00a70d52f1a43db84e2ad2315db5b9b_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656957198/Put/seqid=0 2024-12-08T11:22:37,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T11:22:37,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742498_1674 (size=20074) 2024-12-08T11:22:37,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,305 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:37,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:37,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:37,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657017306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:37,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657017411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T11:22:37,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:37,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:37,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,458 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,610 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:37,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:37,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:37,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657017616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,662 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:37,666 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d00a70d52f1a43db84e2ad2315db5b9b_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d00a70d52f1a43db84e2ad2315db5b9b_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:37,666 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8926c34891984853a9edbd0111cf8701, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:37,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8926c34891984853a9edbd0111cf8701 is 175, key is test_row_0/A:col10/1733656957198/Put/seqid=0 2024-12-08T11:22:37,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742499_1675 (size=57333) 2024-12-08T11:22:37,670 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=266, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8926c34891984853a9edbd0111cf8701 2024-12-08T11:22:37,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5d658c440e7e4a0aa87b0957a6bc6177 is 50, key is test_row_0/B:col10/1733656957198/Put/seqid=0 2024-12-08T11:22:37,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742500_1676 (size=12301) 2024-12-08T11:22:37,679 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5d658c440e7e4a0aa87b0957a6bc6177 2024-12-08T11:22:37,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/285a8089ec1e4b009b58412cd7fd39a2 is 50, key is test_row_0/C:col10/1733656957198/Put/seqid=0 2024-12-08T11:22:37,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742501_1677 (size=12301) 2024-12-08T11:22:37,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T11:22:37,762 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:37,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:37,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,915 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:37,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:37,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:37,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:37,916 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:37,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:37,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657017922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,068 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:38,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:38,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,089 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=266 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/285a8089ec1e4b009b58412cd7fd39a2 2024-12-08T11:22:38,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/8926c34891984853a9edbd0111cf8701 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8926c34891984853a9edbd0111cf8701 2024-12-08T11:22:38,095 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8926c34891984853a9edbd0111cf8701, entries=300, sequenceid=266, filesize=56.0 K 2024-12-08T11:22:38,095 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/5d658c440e7e4a0aa87b0957a6bc6177 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5d658c440e7e4a0aa87b0957a6bc6177 2024-12-08T11:22:38,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5d658c440e7e4a0aa87b0957a6bc6177, entries=150, sequenceid=266, filesize=12.0 K 2024-12-08T11:22:38,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/285a8089ec1e4b009b58412cd7fd39a2 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/285a8089ec1e4b009b58412cd7fd39a2 2024-12-08T11:22:38,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46220 deadline: 1733657018099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,102 DEBUG [Thread-2639 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:22:38,102 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46252 deadline: 1733657018100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,103 DEBUG [Thread-2643 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., hostname=355ef6e50110,46083,1733656795491, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-08T11:22:38,105 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/285a8089ec1e4b009b58412cd7fd39a2, entries=150, sequenceid=266, filesize=12.0 K 2024-12-08T11:22:38,106 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b64ae42323ee772e9f6012e0f6975e26 in 877ms, sequenceid=266, compaction requested=true 2024-12-08T11:22:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:38,106 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:38,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:38,106 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:38,107 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 159804 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:38,107 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:38,107 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/B is initiating minor compaction (all files) 2024-12-08T11:22:38,107 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/A is initiating minor compaction (all files) 2024-12-08T11:22:38,107 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/B in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,107 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/A in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,107 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/705cdb22a7d24b6c9aa81fad1e5c162f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/ed83d7c325a0476db85bff5e69151389, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b6dc446d97504068b70d7efb3063cb26, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5d658c440e7e4a0aa87b0957a6bc6177] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=48.1 K 2024-12-08T11:22:38,107 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/fb3dffe77e45412190f57921b5aee6d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/75f1eb9efba1425b975acaeb3cf96bca, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/721f186c9bad4cc5b009762bd16b4e89, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8926c34891984853a9edbd0111cf8701] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=156.1 K 2024-12-08T11:22:38,107 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,107 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/fb3dffe77e45412190f57921b5aee6d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/75f1eb9efba1425b975acaeb3cf96bca, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/721f186c9bad4cc5b009762bd16b4e89, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8926c34891984853a9edbd0111cf8701] 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 705cdb22a7d24b6c9aa81fad1e5c162f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733656953168 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb3dffe77e45412190f57921b5aee6d5, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733656953168 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ed83d7c325a0476db85bff5e69151389, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656953826 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 75f1eb9efba1425b975acaeb3cf96bca, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656953823 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting b6dc446d97504068b70d7efb3063cb26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733656954956 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 721f186c9bad4cc5b009762bd16b4e89, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733656954956 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d658c440e7e4a0aa87b0957a6bc6177, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733656957198 2024-12-08T11:22:38,108 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8926c34891984853a9edbd0111cf8701, keycount=300, bloomtype=ROW, size=56.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733656957183 2024-12-08T11:22:38,114 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:38,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:38,115 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T11:22:38,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:38,115 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#B#compaction#575 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:38,116 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:38,116 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/7c9c903e93294b9ebde34dbd15440e4d is 50, key is test_row_0/B:col10/1733656957198/Put/seqid=0 2024-12-08T11:22:38,117 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241208f8e1e682cd334b53befd91fc4707471a_b64ae42323ee772e9f6012e0f6975e26 store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:38,120 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241208f8e1e682cd334b53befd91fc4707471a_b64ae42323ee772e9f6012e0f6975e26, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:38,120 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208f8e1e682cd334b53befd91fc4707471a_b64ae42323ee772e9f6012e0f6975e26 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:38,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742502_1678 (size=12949) 2024-12-08T11:22:38,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742503_1679 (size=4469) 2024-12-08T11:22:38,126 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/7c9c903e93294b9ebde34dbd15440e4d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/7c9c903e93294b9ebde34dbd15440e4d 2024-12-08T11:22:38,129 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/B of b64ae42323ee772e9f6012e0f6975e26 into 7c9c903e93294b9ebde34dbd15440e4d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:38,129 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:38,129 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/B, priority=12, startTime=1733656958106; duration=0sec 2024-12-08T11:22:38,129 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:38,130 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:B 2024-12-08T11:22:38,130 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:38,131 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:38,131 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/C is initiating minor compaction (all files) 2024-12-08T11:22:38,131 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/C in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,131 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/c5afaf35cf8f4fd5ac25db898db72d33, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/f064fc2100e845ab910abf0ce2d24978, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2973f9f5665549d786930f9b7c32fd4a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/285a8089ec1e4b009b58412cd7fd39a2] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=48.1 K 2024-12-08T11:22:38,131 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c5afaf35cf8f4fd5ac25db898db72d33, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1733656953168 2024-12-08T11:22:38,132 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting f064fc2100e845ab910abf0ce2d24978, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1733656953826 2024-12-08T11:22:38,132 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 2973f9f5665549d786930f9b7c32fd4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1733656954956 2024-12-08T11:22:38,132 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082c852e2ccb884a068d9b709e5c85530a_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656957304/Put/seqid=0 2024-12-08T11:22:38,132 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 285a8089ec1e4b009b58412cd7fd39a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733656957198 2024-12-08T11:22:38,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657018130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657018130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742504_1680 (size=14994) 2024-12-08T11:22:38,146 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#C#compaction#578 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:38,147 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/ee8f1762cfae4d35ba71295d45c570f4 is 50, key is test_row_0/C:col10/1733656957198/Put/seqid=0 2024-12-08T11:22:38,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742505_1681 (size=12949) 2024-12-08T11:22:38,153 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/ee8f1762cfae4d35ba71295d45c570f4 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/ee8f1762cfae4d35ba71295d45c570f4 2024-12-08T11:22:38,157 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/C of b64ae42323ee772e9f6012e0f6975e26 into ee8f1762cfae4d35ba71295d45c570f4(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:38,157 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:38,157 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/C, priority=12, startTime=1733656958106; duration=0sec 2024-12-08T11:22:38,157 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:38,157 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:C 2024-12-08T11:22:38,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:38,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:38,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657018236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657018236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T11:22:38,374 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,374 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:38,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:38,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,374 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,427 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657018426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657018438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657018438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,526 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#A#compaction#576 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:38,526 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,526 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/50a3f1c513bd4825aa7320de72faa79b is 175, key is test_row_0/A:col10/1733656957198/Put/seqid=0 2024-12-08T11:22:38,527 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:38,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:38,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,527 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,541 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:38,544 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082c852e2ccb884a068d9b709e5c85530a_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082c852e2ccb884a068d9b709e5c85530a_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:38,545 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/b6b7d902d7214454af216e8580af7668, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:38,545 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/b6b7d902d7214454af216e8580af7668 is 175, key is test_row_0/A:col10/1733656957304/Put/seqid=0 2024-12-08T11:22:38,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742506_1682 (size=31903) 2024-12-08T11:22:38,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742507_1683 (size=39949) 2024-12-08T11:22:38,549 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/b6b7d902d7214454af216e8580af7668 2024-12-08T11:22:38,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c528d062a39e499b9506295e50a5589c is 50, key is test_row_0/B:col10/1733656957304/Put/seqid=0 2024-12-08T11:22:38,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742508_1684 (size=12301) 2024-12-08T11:22:38,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:38,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:38,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657018741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:38,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657018741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,840 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:38,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:38,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,840 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] handler.RSProcedureHandler(58): pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=180 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=180 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:38,951 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/50a3f1c513bd4825aa7320de72faa79b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/50a3f1c513bd4825aa7320de72faa79b 2024-12-08T11:22:38,955 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/A of b64ae42323ee772e9f6012e0f6975e26 into 50a3f1c513bd4825aa7320de72faa79b(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:38,955 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:38,955 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/A, priority=12, startTime=1733656958106; duration=0sec 2024-12-08T11:22:38,955 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:38,955 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:A 2024-12-08T11:22:38,959 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c528d062a39e499b9506295e50a5589c 2024-12-08T11:22:38,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/81fd3de7130840f79367f7cb97ce1d0c is 50, key is test_row_0/C:col10/1733656957304/Put/seqid=0 2024-12-08T11:22:38,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742509_1685 (size=12301) 2024-12-08T11:22:38,968 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/81fd3de7130840f79367f7cb97ce1d0c 2024-12-08T11:22:38,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/b6b7d902d7214454af216e8580af7668 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/b6b7d902d7214454af216e8580af7668 2024-12-08T11:22:38,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/b6b7d902d7214454af216e8580af7668, entries=200, sequenceid=292, filesize=39.0 K 2024-12-08T11:22:38,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/c528d062a39e499b9506295e50a5589c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c528d062a39e499b9506295e50a5589c 2024-12-08T11:22:38,980 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c528d062a39e499b9506295e50a5589c, entries=150, sequenceid=292, filesize=12.0 K 2024-12-08T11:22:38,980 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/81fd3de7130840f79367f7cb97ce1d0c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/81fd3de7130840f79367f7cb97ce1d0c 2024-12-08T11:22:38,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/81fd3de7130840f79367f7cb97ce1d0c, entries=150, sequenceid=292, filesize=12.0 K 2024-12-08T11:22:38,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for b64ae42323ee772e9f6012e0f6975e26 in 869ms, sequenceid=292, compaction requested=false 2024-12-08T11:22:38,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:38,993 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:38,994 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-12-08T11:22:38,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:38,994 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-08T11:22:38,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:38,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:38,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:38,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:38,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:38,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:38,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208bc2fc2317b2c4c77a4253fc65e499204_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656958128/Put/seqid=0 2024-12-08T11:22:39,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742510_1686 (size=12454) 2024-12-08T11:22:39,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,006 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208bc2fc2317b2c4c77a4253fc65e499204_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bc2fc2317b2c4c77a4253fc65e499204_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:39,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/474eddd4a65f43a1910acc985cb9269d, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:39,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/474eddd4a65f43a1910acc985cb9269d is 175, key is test_row_0/A:col10/1733656958128/Put/seqid=0 2024-12-08T11:22:39,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742511_1687 (size=31255) 2024-12-08T11:22:39,010 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=305, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/474eddd4a65f43a1910acc985cb9269d 2024-12-08T11:22:39,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/a917617daa6448df93c046cef39e0705 is 50, key is test_row_0/B:col10/1733656958128/Put/seqid=0 2024-12-08T11:22:39,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742512_1688 (size=12301) 2024-12-08T11:22:39,026 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/a917617daa6448df93c046cef39e0705 2024-12-08T11:22:39,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/3c86c911bee14d0dbd7e8631a931381d is 50, key is test_row_0/C:col10/1733656958128/Put/seqid=0 2024-12-08T11:22:39,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742513_1689 (size=12301) 2024-12-08T11:22:39,036 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/3c86c911bee14d0dbd7e8631a931381d 2024-12-08T11:22:39,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/474eddd4a65f43a1910acc985cb9269d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/474eddd4a65f43a1910acc985cb9269d 2024-12-08T11:22:39,043 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/474eddd4a65f43a1910acc985cb9269d, entries=150, sequenceid=305, filesize=30.5 K 2024-12-08T11:22:39,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/a917617daa6448df93c046cef39e0705 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a917617daa6448df93c046cef39e0705 2024-12-08T11:22:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,046 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a917617daa6448df93c046cef39e0705, entries=150, sequenceid=305, filesize=12.0 K 2024-12-08T11:22:39,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/3c86c911bee14d0dbd7e8631a931381d as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c86c911bee14d0dbd7e8631a931381d 2024-12-08T11:22:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,051 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c86c911bee14d0dbd7e8631a931381d, entries=150, sequenceid=305, filesize=12.0 K 2024-12-08T11:22:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,051 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for b64ae42323ee772e9f6012e0f6975e26 in 57ms, sequenceid=305, compaction requested=true 2024-12-08T11:22:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:39,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-12-08T11:22:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-12-08T11:22:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-12-08T11:22:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9000 sec 2024-12-08T11:22:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 1.9030 sec 2024-12-08T11:22:39,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-08T11:22:39,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,256 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-12-08T11:22:39,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-08T11:22:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-12-08T11:22:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,259 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-08T11:22:39,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T11:22:39,260 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-08T11:22:39,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-08T11:22:39,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:39,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-08T11:22:39,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,339 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:39,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:39,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:39,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:39,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:39,340 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:39,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c5be5feca38a467f86b1161d11efe085_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656959329/Put/seqid=0 2024-12-08T11:22:39,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742514_1690 (size=17534) 2024-12-08T11:22:39,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,358 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T11:22:39,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,364 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208c5be5feca38a467f86b1161d11efe085_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c5be5feca38a467f86b1161d11efe085_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:39,365 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/c917990dd1b44f298eb5149442a8fb11, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:39,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/c917990dd1b44f298eb5149442a8fb11 is 175, key is test_row_0/A:col10/1733656959329/Put/seqid=0 2024-12-08T11:22:39,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742515_1691 (size=48635) 2024-12-08T11:22:39,385 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=316, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/c917990dd1b44f298eb5149442a8fb11 2024-12-08T11:22:39,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/715a83ca2c264850a8bed65b78472f3c is 50, key is test_row_0/B:col10/1733656959329/Put/seqid=0 2024-12-08T11:22:39,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:39,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46198 deadline: 1733657019400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:39,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:39,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46188 deadline: 1733657019403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:39,411 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:39,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:39,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:39,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,412 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742516_1692 (size=9857) 2024-12-08T11:22:39,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/715a83ca2c264850a8bed65b78472f3c 2024-12-08T11:22:39,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-08T11:22:39,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=46083 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46192 deadline: 1733657019430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 2024-12-08T11:22:39,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/434b7f5998754a8caa628cd4e15dd371 is 50, key is test_row_0/C:col10/1733656959329/Put/seqid=0 2024-12-08T11:22:39,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742517_1693 (size=9857) 2024-12-08T11:22:39,442 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/434b7f5998754a8caa628cd4e15dd371 2024-12-08T11:22:39,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/c917990dd1b44f298eb5149442a8fb11 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/c917990dd1b44f298eb5149442a8fb11 2024-12-08T11:22:39,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/c917990dd1b44f298eb5149442a8fb11, entries=250, sequenceid=316, filesize=47.5 K 2024-12-08T11:22:39,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/715a83ca2c264850a8bed65b78472f3c as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/715a83ca2c264850a8bed65b78472f3c 2024-12-08T11:22:39,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/715a83ca2c264850a8bed65b78472f3c, entries=100, sequenceid=316, filesize=9.6 K 2024-12-08T11:22:39,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/434b7f5998754a8caa628cd4e15dd371 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/434b7f5998754a8caa628cd4e15dd371 2024-12-08T11:22:39,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/434b7f5998754a8caa628cd4e15dd371, entries=100, sequenceid=316, filesize=9.6 K 2024-12-08T11:22:39,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for b64ae42323ee772e9f6012e0f6975e26 in 128ms, sequenceid=316, compaction requested=true 2024-12-08T11:22:39,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:39,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:A, priority=-2147483648, current under compaction store size is 1 2024-12-08T11:22:39,467 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:39,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:39,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:B, priority=-2147483648, current under compaction store size is 2 2024-12-08T11:22:39,467 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:39,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:39,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store b64ae42323ee772e9f6012e0f6975e26:C, priority=-2147483648, current under compaction store size is 3 2024-12-08T11:22:39,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:39,468 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151742 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:39,468 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/A is initiating minor compaction (all files) 2024-12-08T11:22:39,468 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/A in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,468 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/50a3f1c513bd4825aa7320de72faa79b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/b6b7d902d7214454af216e8580af7668, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/474eddd4a65f43a1910acc985cb9269d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/c917990dd1b44f298eb5149442a8fb11] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=148.2 K 2024-12-08T11:22:39,468 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,468 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. files: [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/50a3f1c513bd4825aa7320de72faa79b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/b6b7d902d7214454af216e8580af7668, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/474eddd4a65f43a1910acc985cb9269d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/c917990dd1b44f298eb5149442a8fb11] 2024-12-08T11:22:39,469 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47408 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:39,469 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/B is initiating minor compaction (all files) 2024-12-08T11:22:39,469 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/B in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,469 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/7c9c903e93294b9ebde34dbd15440e4d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c528d062a39e499b9506295e50a5589c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a917617daa6448df93c046cef39e0705, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/715a83ca2c264850a8bed65b78472f3c] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=46.3 K 2024-12-08T11:22:39,469 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50a3f1c513bd4825aa7320de72faa79b, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733656957198 2024-12-08T11:22:39,470 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c9c903e93294b9ebde34dbd15440e4d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733656957198 2024-12-08T11:22:39,470 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6b7d902d7214454af216e8580af7668, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656957273 2024-12-08T11:22:39,470 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting c528d062a39e499b9506295e50a5589c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656957273 2024-12-08T11:22:39,470 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting 474eddd4a65f43a1910acc985cb9269d, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733656958124 2024-12-08T11:22:39,470 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting a917617daa6448df93c046cef39e0705, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733656958124 2024-12-08T11:22:39,471 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] compactions.Compactor(224): Compacting c917990dd1b44f298eb5149442a8fb11, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733656959318 2024-12-08T11:22:39,471 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 715a83ca2c264850a8bed65b78472f3c, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733656959329 2024-12-08T11:22:39,486 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#B#compaction#587 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:39,487 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/bbe116f67f8e49f586bb65e6c390836b is 50, key is test_row_0/B:col10/1733656959329/Put/seqid=0 2024-12-08T11:22:39,492 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:39,504 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412082fcd62e75f6c4a7d89c1e3789804015f_b64ae42323ee772e9f6012e0f6975e26 store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:39,506 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412082fcd62e75f6c4a7d89c1e3789804015f_b64ae42323ee772e9f6012e0f6975e26, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:39,507 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412082fcd62e75f6c4a7d89c1e3789804015f_b64ae42323ee772e9f6012e0f6975e26 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:39,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-08T11:22:39,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:39,507 DEBUG [Thread-2656 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e8cd1ae to 127.0.0.1:63801 2024-12-08T11:22:39,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:39,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:39,507 DEBUG [Thread-2656 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:39,507 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:39,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:39,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:39,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46083 {}] regionserver.HRegion(8581): Flush requested on b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:39,508 DEBUG [Thread-2645 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d7fe431 to 127.0.0.1:63801 2024-12-08T11:22:39,508 DEBUG [Thread-2645 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:39,508 DEBUG [Thread-2654 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:63801 2024-12-08T11:22:39,509 DEBUG [Thread-2654 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:39,509 DEBUG [Thread-2648 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58971172 to 127.0.0.1:63801 2024-12-08T11:22:39,509 DEBUG [Thread-2648 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:39,509 DEBUG [Thread-2652 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:63801 2024-12-08T11:22:39,509 DEBUG [Thread-2652 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:39,510 DEBUG [Thread-2650 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7fe93b to 127.0.0.1:63801 2024-12-08T11:22:39,510 DEBUG [Thread-2650 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:39,511 DEBUG [Thread-2641 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b5141 to 127.0.0.1:63801 2024-12-08T11:22:39,511 DEBUG [Thread-2641 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:39,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742518_1694 (size=13085) 2024-12-08T11:22:39,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742519_1695 (size=4469) 2024-12-08T11:22:39,520 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b02a2ae4143640bbb1ca8180fdc52967_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656959388/Put/seqid=0 2024-12-08T11:22:39,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742520_1696 (size=12454) 2024-12-08T11:22:39,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T11:22:39,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:39,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:39,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:39,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,722 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:39,722 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:39,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:39,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T11:22:39,874 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:39,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:39,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:39,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:39,916 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/bbe116f67f8e49f586bb65e6c390836b as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/bbe116f67f8e49f586bb65e6c390836b 2024-12-08T11:22:39,918 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#A#compaction#588 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:39,918 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/4f4d83ca58be4fdebf3e9290204426ef is 175, key is test_row_0/A:col10/1733656959329/Put/seqid=0 2024-12-08T11:22:39,920 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/B of b64ae42323ee772e9f6012e0f6975e26 into bbe116f67f8e49f586bb65e6c390836b(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:39,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:39,920 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/B, priority=12, startTime=1733656959467; duration=0sec 2024-12-08T11:22:39,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-08T11:22:39,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:B 2024-12-08T11:22:39,920 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-08T11:22:39,921 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47408 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-08T11:22:39,921 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1540): b64ae42323ee772e9f6012e0f6975e26/C is initiating minor compaction (all files) 2024-12-08T11:22:39,921 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of b64ae42323ee772e9f6012e0f6975e26/C in TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:39,921 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/ee8f1762cfae4d35ba71295d45c570f4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/81fd3de7130840f79367f7cb97ce1d0c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c86c911bee14d0dbd7e8631a931381d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/434b7f5998754a8caa628cd4e15dd371] into tmpdir=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp, totalSize=46.3 K 2024-12-08T11:22:39,921 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting ee8f1762cfae4d35ba71295d45c570f4, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=266, earliestPutTs=1733656957198 2024-12-08T11:22:39,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742521_1697 (size=32146) 2024-12-08T11:22:39,922 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 81fd3de7130840f79367f7cb97ce1d0c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1733656957273 2024-12-08T11:22:39,922 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c86c911bee14d0dbd7e8631a931381d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1733656958124 2024-12-08T11:22:39,922 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] compactions.Compactor(224): Compacting 434b7f5998754a8caa628cd4e15dd371, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1733656959329 2024-12-08T11:22:39,925 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:39,927 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): b64ae42323ee772e9f6012e0f6975e26#C#compaction#590 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-08T11:22:39,927 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/a1200941c75a41589e53ef073797aafe is 50, key is test_row_0/C:col10/1733656959329/Put/seqid=0 2024-12-08T11:22:39,927 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208b02a2ae4143640bbb1ca8180fdc52967_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b02a2ae4143640bbb1ca8180fdc52967_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:39,928 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/12259f48168d4670b0522216244447c9, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:39,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/12259f48168d4670b0522216244447c9 is 175, key is test_row_0/A:col10/1733656959388/Put/seqid=0 2024-12-08T11:22:39,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742522_1698 (size=13085) 2024-12-08T11:22:39,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742523_1699 (size=31255) 2024-12-08T11:22:40,027 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:40,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:40,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:40,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,179 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:40,180 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:40,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:40,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,180 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,325 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/4f4d83ca58be4fdebf3e9290204426ef as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/4f4d83ca58be4fdebf3e9290204426ef 2024-12-08T11:22:40,328 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/A of b64ae42323ee772e9f6012e0f6975e26 into 4f4d83ca58be4fdebf3e9290204426ef(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:40,328 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:40,328 INFO [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/A, priority=12, startTime=1733656959467; duration=0sec 2024-12-08T11:22:40,328 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:40,328 DEBUG [RS:0;355ef6e50110:46083-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:A 2024-12-08T11:22:40,332 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:40,332 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=342, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/12259f48168d4670b0522216244447c9 2024-12-08T11:22:40,332 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:40,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:40,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,332 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,333 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/a1200941c75a41589e53ef073797aafe as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a1200941c75a41589e53ef073797aafe 2024-12-08T11:22:40,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/8d40b1e5299841a2b2f89f07fd88f7c9 is 50, key is test_row_0/B:col10/1733656959388/Put/seqid=0 2024-12-08T11:22:40,338 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in b64ae42323ee772e9f6012e0f6975e26/C of b64ae42323ee772e9f6012e0f6975e26 into a1200941c75a41589e53ef073797aafe(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-08T11:22:40,339 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:40,339 INFO [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26., storeName=b64ae42323ee772e9f6012e0f6975e26/C, priority=12, startTime=1733656959467; duration=0sec 2024-12-08T11:22:40,339 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-08T11:22:40,339 DEBUG [RS:0;355ef6e50110:46083-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: b64ae42323ee772e9f6012e0f6975e26:C 2024-12-08T11:22:40,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742524_1700 (size=12301) 2024-12-08T11:22:40,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T11:22:40,484 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:40,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:40,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:40,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,485 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,637 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:40,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:40,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:40,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/8d40b1e5299841a2b2f89f07fd88f7c9 2024-12-08T11:22:40,746 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e7745cfc63814740b5a883db38695bc0 is 50, key is test_row_0/C:col10/1733656959388/Put/seqid=0 2024-12-08T11:22:40,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742525_1701 (size=12301) 2024-12-08T11:22:40,789 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:40,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:40,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:40,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,790 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,941 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:40,942 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:40,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:40,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:40,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:40,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:41,094 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:41,094 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:41,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:41,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. as already flushing 2024-12-08T11:22:41,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:41,094 ERROR [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:41,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:41,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-08T11:22:41,149 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e7745cfc63814740b5a883db38695bc0 2024-12-08T11:22:41,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/12259f48168d4670b0522216244447c9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/12259f48168d4670b0522216244447c9 2024-12-08T11:22:41,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/12259f48168d4670b0522216244447c9, entries=150, sequenceid=342, filesize=30.5 K 2024-12-08T11:22:41,155 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/8d40b1e5299841a2b2f89f07fd88f7c9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/8d40b1e5299841a2b2f89f07fd88f7c9 2024-12-08T11:22:41,157 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/8d40b1e5299841a2b2f89f07fd88f7c9, entries=150, sequenceid=342, filesize=12.0 K 2024-12-08T11:22:41,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e7745cfc63814740b5a883db38695bc0 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e7745cfc63814740b5a883db38695bc0 2024-12-08T11:22:41,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e7745cfc63814740b5a883db38695bc0, entries=150, sequenceid=342, filesize=12.0 K 2024-12-08T11:22:41,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=6.71 KB/6870 for b64ae42323ee772e9f6012e0f6975e26 in 1653ms, sequenceid=342, compaction requested=false 2024-12-08T11:22:41,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:41,246 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:41,246 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=46083 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-12-08T11:22:41,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:41,246 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-12-08T11:22:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:41,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:41,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208002d4142894147f38260abc035c1bcd7_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_2/A:col10/1733656959508/Put/seqid=0 2024-12-08T11:22:41,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742526_1702 (size=7374) 2024-12-08T11:22:41,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T11:22:41,439 DEBUG [Thread-2637 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fb684eb to 127.0.0.1:63801 2024-12-08T11:22:41,439 DEBUG [Thread-2637 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:41,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:41,657 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208002d4142894147f38260abc035c1bcd7_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208002d4142894147f38260abc035c1bcd7_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:41,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/676175e4976c4568acba87da08aefbcd, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:41,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/676175e4976c4568acba87da08aefbcd is 175, key is test_row_2/A:col10/1733656959508/Put/seqid=0 2024-12-08T11:22:41,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742527_1703 (size=13865) 2024-12-08T11:22:42,062 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=349, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/676175e4976c4568acba87da08aefbcd 2024-12-08T11:22:42,067 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/d8ccf21dfe83423592107cdbb17786aa is 50, key is test_row_2/B:col10/1733656959508/Put/seqid=0 2024-12-08T11:22:42,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742528_1704 (size=7415) 2024-12-08T11:22:42,470 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/d8ccf21dfe83423592107cdbb17786aa 2024-12-08T11:22:42,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e85fd1fbd74249248efe86d4bc9848dd is 50, key is test_row_2/C:col10/1733656959508/Put/seqid=0 2024-12-08T11:22:42,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742529_1705 (size=7415) 2024-12-08T11:22:42,879 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=349 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e85fd1fbd74249248efe86d4bc9848dd 2024-12-08T11:22:42,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/676175e4976c4568acba87da08aefbcd as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/676175e4976c4568acba87da08aefbcd 2024-12-08T11:22:42,884 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/676175e4976c4568acba87da08aefbcd, entries=50, sequenceid=349, filesize=13.5 K 2024-12-08T11:22:42,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/d8ccf21dfe83423592107cdbb17786aa as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/d8ccf21dfe83423592107cdbb17786aa 2024-12-08T11:22:42,887 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/d8ccf21dfe83423592107cdbb17786aa, entries=50, sequenceid=349, filesize=7.2 K 2024-12-08T11:22:42,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/e85fd1fbd74249248efe86d4bc9848dd as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e85fd1fbd74249248efe86d4bc9848dd 2024-12-08T11:22:42,890 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e85fd1fbd74249248efe86d4bc9848dd, entries=50, sequenceid=349, filesize=7.2 K 2024-12-08T11:22:42,890 INFO [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=6.71 KB/6870 for b64ae42323ee772e9f6012e0f6975e26 in 1644ms, sequenceid=349, compaction requested=true 2024-12-08T11:22:42,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:42,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:42,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/355ef6e50110:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-12-08T11:22:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-12-08T11:22:42,892 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-12-08T11:22:42,892 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6310 sec 2024-12-08T11:22:42,893 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 3.6340 sec 2024-12-08T11:22:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-12-08T11:22:43,364 INFO [Thread-2647 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-12-08T11:22:48,181 DEBUG [Thread-2643 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11a52cdf to 127.0.0.1:63801 2024-12-08T11:22:48,181 DEBUG [Thread-2643 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:48,200 DEBUG [Thread-2639 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0644b7e6 to 127.0.0.1:63801 2024-12-08T11:22:48,200 DEBUG [Thread-2639 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 105 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 35 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 31 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6432 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6027 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6105 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6429 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5997 2024-12-08T11:22:48,200 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-08T11:22:48,200 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T11:22:48,200 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2cbfd84f to 127.0.0.1:63801 2024-12-08T11:22:48,200 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:48,201 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-08T11:22:48,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-08T11:22:48,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:48,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T11:22:48,203 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656968203"}]},"ts":"1733656968203"} 2024-12-08T11:22:48,204 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-08T11:22:48,205 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-08T11:22:48,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-08T11:22:48,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=185, ppid=184, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, UNASSIGN}] 2024-12-08T11:22:48,207 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=185, ppid=184, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, UNASSIGN 2024-12-08T11:22:48,208 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=185 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=CLOSING, regionLocation=355ef6e50110,46083,1733656795491 2024-12-08T11:22:48,208 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-08T11:22:48,208 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; CloseRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491}] 2024-12-08T11:22:48,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T11:22:48,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 355ef6e50110,46083,1733656795491 2024-12-08T11:22:48,360 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] handler.UnassignRegionHandler(124): Close b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1681): Closing b64ae42323ee772e9f6012e0f6975e26, disabling compactions & flushes 2024-12-08T11:22:48,360 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. after waiting 0 ms 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:48,360 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(2837): Flushing b64ae42323ee772e9f6012e0f6975e26 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=A 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=B 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactingMemStore(205): FLUSHING TO DISK b64ae42323ee772e9f6012e0f6975e26, store=C 2024-12-08T11:22:48,360 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-08T11:22:48,365 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d546ba3e03874960a8fe3681de4bc6c5_b64ae42323ee772e9f6012e0f6975e26 is 50, key is test_row_0/A:col10/1733656961438/Put/seqid=0 2024-12-08T11:22:48,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742530_1706 (size=12454) 2024-12-08T11:22:48,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T11:22:48,770 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-08T11:22:48,773 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241208d546ba3e03874960a8fe3681de4bc6c5_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d546ba3e03874960a8fe3681de4bc6c5_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:48,773 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/bf4da27296c3438f9e0b8358b1b850e3, store: [table=TestAcidGuarantees family=A region=b64ae42323ee772e9f6012e0f6975e26] 2024-12-08T11:22:48,774 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/bf4da27296c3438f9e0b8358b1b850e3 is 175, key is test_row_0/A:col10/1733656961438/Put/seqid=0 2024-12-08T11:22:48,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742531_1707 (size=31255) 2024-12-08T11:22:48,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T11:22:49,177 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/bf4da27296c3438f9e0b8358b1b850e3 2024-12-08T11:22:49,182 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/a1bc8801aaa242d8b62d70eef9413096 is 50, key is test_row_0/B:col10/1733656961438/Put/seqid=0 2024-12-08T11:22:49,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742532_1708 (size=12301) 2024-12-08T11:22:49,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T11:22:49,586 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/a1bc8801aaa242d8b62d70eef9413096 2024-12-08T11:22:49,591 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/04422d0af3a34e16b89a282530ff03a9 is 50, key is test_row_0/C:col10/1733656961438/Put/seqid=0 2024-12-08T11:22:49,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742533_1709 (size=12301) 2024-12-08T11:22:49,994 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/04422d0af3a34e16b89a282530ff03a9 2024-12-08T11:22:49,997 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/A/bf4da27296c3438f9e0b8358b1b850e3 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/bf4da27296c3438f9e0b8358b1b850e3 2024-12-08T11:22:50,000 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/bf4da27296c3438f9e0b8358b1b850e3, entries=150, sequenceid=355, filesize=30.5 K 2024-12-08T11:22:50,000 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/B/a1bc8801aaa242d8b62d70eef9413096 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a1bc8801aaa242d8b62d70eef9413096 2024-12-08T11:22:50,003 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a1bc8801aaa242d8b62d70eef9413096, entries=150, sequenceid=355, filesize=12.0 K 2024-12-08T11:22:50,003 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/.tmp/C/04422d0af3a34e16b89a282530ff03a9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/04422d0af3a34e16b89a282530ff03a9 2024-12-08T11:22:50,005 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/04422d0af3a34e16b89a282530ff03a9, entries=150, sequenceid=355, filesize=12.0 K 2024-12-08T11:22:50,006 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for b64ae42323ee772e9f6012e0f6975e26 in 1646ms, sequenceid=355, compaction requested=true 2024-12-08T11:22:50,006 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8b8014742d854d4399e5835bacaa6532, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/9338d68abf7c4a20b0f62e299968d756, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8db62c4bf57e4d3e925a0946431b35f2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/aec3765596ff4c67946aad28fb12cbed, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/318b6e4d7bd34a91998d876b7065bf85, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/37c69920a93346aea0b2db51dfef48ad, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/dcbdf5d3689b481abef0c4dab0cd25eb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/cf6ab20cbbaa4fd389ca33c9f8ad6158, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/df049fa995c14f78a87e54973b2f069e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/17c07391214a462b9e492851978b62cf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0b07ec89594f47a0a272aedc60faf67d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/ca56cffd71d340d69ab942ba44537aae, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0d2842a630344f959aee8d2bb4632474, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/af966c44d20c45539d51d1c2d65f9c4b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/356436eaa2fd4fb0932b4a5db77f5290, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/fb3dffe77e45412190f57921b5aee6d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/75f1eb9efba1425b975acaeb3cf96bca, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/721f186c9bad4cc5b009762bd16b4e89, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8926c34891984853a9edbd0111cf8701, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/50a3f1c513bd4825aa7320de72faa79b, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/b6b7d902d7214454af216e8580af7668, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/474eddd4a65f43a1910acc985cb9269d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/c917990dd1b44f298eb5149442a8fb11] to archive 2024-12-08T11:22:50,007 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:22:50,008 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8b8014742d854d4399e5835bacaa6532 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8b8014742d854d4399e5835bacaa6532 2024-12-08T11:22:50,009 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/9338d68abf7c4a20b0f62e299968d756 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/9338d68abf7c4a20b0f62e299968d756 2024-12-08T11:22:50,010 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8db62c4bf57e4d3e925a0946431b35f2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8db62c4bf57e4d3e925a0946431b35f2 2024-12-08T11:22:50,010 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/aec3765596ff4c67946aad28fb12cbed to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/aec3765596ff4c67946aad28fb12cbed 2024-12-08T11:22:50,011 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/318b6e4d7bd34a91998d876b7065bf85 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/318b6e4d7bd34a91998d876b7065bf85 2024-12-08T11:22:50,012 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/37c69920a93346aea0b2db51dfef48ad to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/37c69920a93346aea0b2db51dfef48ad 2024-12-08T11:22:50,013 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/dcbdf5d3689b481abef0c4dab0cd25eb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/dcbdf5d3689b481abef0c4dab0cd25eb 2024-12-08T11:22:50,014 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/cf6ab20cbbaa4fd389ca33c9f8ad6158 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/cf6ab20cbbaa4fd389ca33c9f8ad6158 2024-12-08T11:22:50,014 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/df049fa995c14f78a87e54973b2f069e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/df049fa995c14f78a87e54973b2f069e 2024-12-08T11:22:50,015 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/17c07391214a462b9e492851978b62cf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/17c07391214a462b9e492851978b62cf 2024-12-08T11:22:50,016 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0b07ec89594f47a0a272aedc60faf67d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0b07ec89594f47a0a272aedc60faf67d 2024-12-08T11:22:50,017 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/ca56cffd71d340d69ab942ba44537aae to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/ca56cffd71d340d69ab942ba44537aae 2024-12-08T11:22:50,018 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0d2842a630344f959aee8d2bb4632474 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/0d2842a630344f959aee8d2bb4632474 2024-12-08T11:22:50,018 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/af966c44d20c45539d51d1c2d65f9c4b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/af966c44d20c45539d51d1c2d65f9c4b 2024-12-08T11:22:50,019 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/356436eaa2fd4fb0932b4a5db77f5290 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/356436eaa2fd4fb0932b4a5db77f5290 2024-12-08T11:22:50,020 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/fb3dffe77e45412190f57921b5aee6d5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/fb3dffe77e45412190f57921b5aee6d5 2024-12-08T11:22:50,021 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/75f1eb9efba1425b975acaeb3cf96bca to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/75f1eb9efba1425b975acaeb3cf96bca 2024-12-08T11:22:50,021 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/721f186c9bad4cc5b009762bd16b4e89 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/721f186c9bad4cc5b009762bd16b4e89 2024-12-08T11:22:50,022 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8926c34891984853a9edbd0111cf8701 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/8926c34891984853a9edbd0111cf8701 2024-12-08T11:22:50,023 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/50a3f1c513bd4825aa7320de72faa79b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/50a3f1c513bd4825aa7320de72faa79b 2024-12-08T11:22:50,024 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/b6b7d902d7214454af216e8580af7668 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/b6b7d902d7214454af216e8580af7668 2024-12-08T11:22:50,024 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/474eddd4a65f43a1910acc985cb9269d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/474eddd4a65f43a1910acc985cb9269d 2024-12-08T11:22:50,025 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/c917990dd1b44f298eb5149442a8fb11 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/c917990dd1b44f298eb5149442a8fb11 2024-12-08T11:22:50,026 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9d2b4dac6de647beae89e2e3dad0bfa6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b289dc20e63a470b90593c400d4450c6, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/0007cfb45c624c4a8ea232f6ac5ac97e, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c89299a030414b9ab7ae362b384d7f59, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/eb2620b2a3034a3088b12424767295d5, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5ad3952e743c4b93aea2b38b19659d9a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5a39e0c4d4354468aed5d6785122d510, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/91c5158e08df4224a629750e5fe01deb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c6aca23b70af4115ac014e457287acd3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b81f5a57d87a423e992577f4309db892, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/14848a7cf4df43479b90e096bf27a11a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b9f43ec59b424acf9dec692dccb3f58a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9019f446fbcb45dd8956fe026e2dee82, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5c8438cd11ef4eca9921138b68435c18, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/705cdb22a7d24b6c9aa81fad1e5c162f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/e42764116141487fb11c6b3cb0f17285, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/ed83d7c325a0476db85bff5e69151389, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b6dc446d97504068b70d7efb3063cb26, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/7c9c903e93294b9ebde34dbd15440e4d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5d658c440e7e4a0aa87b0957a6bc6177, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c528d062a39e499b9506295e50a5589c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a917617daa6448df93c046cef39e0705, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/715a83ca2c264850a8bed65b78472f3c] to archive 2024-12-08T11:22:50,027 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:22:50,028 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9d2b4dac6de647beae89e2e3dad0bfa6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9d2b4dac6de647beae89e2e3dad0bfa6 2024-12-08T11:22:50,028 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b289dc20e63a470b90593c400d4450c6 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b289dc20e63a470b90593c400d4450c6 2024-12-08T11:22:50,029 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/0007cfb45c624c4a8ea232f6ac5ac97e to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/0007cfb45c624c4a8ea232f6ac5ac97e 2024-12-08T11:22:50,030 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c89299a030414b9ab7ae362b384d7f59 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c89299a030414b9ab7ae362b384d7f59 2024-12-08T11:22:50,031 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/eb2620b2a3034a3088b12424767295d5 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/eb2620b2a3034a3088b12424767295d5 2024-12-08T11:22:50,031 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5ad3952e743c4b93aea2b38b19659d9a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5ad3952e743c4b93aea2b38b19659d9a 2024-12-08T11:22:50,032 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5a39e0c4d4354468aed5d6785122d510 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5a39e0c4d4354468aed5d6785122d510 2024-12-08T11:22:50,033 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/91c5158e08df4224a629750e5fe01deb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/91c5158e08df4224a629750e5fe01deb 2024-12-08T11:22:50,034 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c6aca23b70af4115ac014e457287acd3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c6aca23b70af4115ac014e457287acd3 2024-12-08T11:22:50,035 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b81f5a57d87a423e992577f4309db892 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b81f5a57d87a423e992577f4309db892 2024-12-08T11:22:50,036 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/14848a7cf4df43479b90e096bf27a11a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/14848a7cf4df43479b90e096bf27a11a 2024-12-08T11:22:50,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b9f43ec59b424acf9dec692dccb3f58a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b9f43ec59b424acf9dec692dccb3f58a 2024-12-08T11:22:50,037 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9019f446fbcb45dd8956fe026e2dee82 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/9019f446fbcb45dd8956fe026e2dee82 2024-12-08T11:22:50,038 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5c8438cd11ef4eca9921138b68435c18 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5c8438cd11ef4eca9921138b68435c18 2024-12-08T11:22:50,039 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/705cdb22a7d24b6c9aa81fad1e5c162f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/705cdb22a7d24b6c9aa81fad1e5c162f 2024-12-08T11:22:50,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/e42764116141487fb11c6b3cb0f17285 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/e42764116141487fb11c6b3cb0f17285 2024-12-08T11:22:50,040 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/ed83d7c325a0476db85bff5e69151389 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/ed83d7c325a0476db85bff5e69151389 2024-12-08T11:22:50,041 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b6dc446d97504068b70d7efb3063cb26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/b6dc446d97504068b70d7efb3063cb26 2024-12-08T11:22:50,042 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/7c9c903e93294b9ebde34dbd15440e4d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/7c9c903e93294b9ebde34dbd15440e4d 2024-12-08T11:22:50,043 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5d658c440e7e4a0aa87b0957a6bc6177 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/5d658c440e7e4a0aa87b0957a6bc6177 2024-12-08T11:22:50,043 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c528d062a39e499b9506295e50a5589c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/c528d062a39e499b9506295e50a5589c 2024-12-08T11:22:50,044 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a917617daa6448df93c046cef39e0705 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a917617daa6448df93c046cef39e0705 2024-12-08T11:22:50,045 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/715a83ca2c264850a8bed65b78472f3c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/715a83ca2c264850a8bed65b78472f3c 2024-12-08T11:22:50,048 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c913a18d0b24232906b51a364b7baaf, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e87440b1ffd0456c80b900cf1f2badaa, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/979bef07575b4a2091d2d6bd458e6cb7, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/31ff27fba10941fd8eca5866c84ce68f, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2bc4e85b2a344dfd9409ded50c25059a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/90894fee9197497ba6f6037f93eda8c8, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/144ea5a4a3db4e868b43b50f269e286c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/d4e5dfb1fa144d71bcdc712e3e5e7c47, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a4f16b815e9041368be72b26e233b86c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e4dd3a54c15b4025a8b2f12b6a9a8f79, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2cacfe3828e44d5da87c912ad4f8cf17, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/eaa5e66180ad4127adc1c13569549c51, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/302a3e2e2c044a7b89be8724fd036c52, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/940425bff65847d3ba48c1ab353622eb, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/c5afaf35cf8f4fd5ac25db898db72d33, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/7c38130299f648f7bf3b29ebbade1ba3, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/f064fc2100e845ab910abf0ce2d24978, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2973f9f5665549d786930f9b7c32fd4a, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/ee8f1762cfae4d35ba71295d45c570f4, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/285a8089ec1e4b009b58412cd7fd39a2, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/81fd3de7130840f79367f7cb97ce1d0c, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c86c911bee14d0dbd7e8631a931381d, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/434b7f5998754a8caa628cd4e15dd371] to archive 2024-12-08T11:22:50,049 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-12-08T11:22:50,050 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c913a18d0b24232906b51a364b7baaf to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c913a18d0b24232906b51a364b7baaf 2024-12-08T11:22:50,051 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e87440b1ffd0456c80b900cf1f2badaa to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e87440b1ffd0456c80b900cf1f2badaa 2024-12-08T11:22:50,051 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/979bef07575b4a2091d2d6bd458e6cb7 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/979bef07575b4a2091d2d6bd458e6cb7 2024-12-08T11:22:50,052 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/31ff27fba10941fd8eca5866c84ce68f to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/31ff27fba10941fd8eca5866c84ce68f 2024-12-08T11:22:50,053 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2bc4e85b2a344dfd9409ded50c25059a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2bc4e85b2a344dfd9409ded50c25059a 2024-12-08T11:22:50,054 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/90894fee9197497ba6f6037f93eda8c8 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/90894fee9197497ba6f6037f93eda8c8 2024-12-08T11:22:50,055 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/144ea5a4a3db4e868b43b50f269e286c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/144ea5a4a3db4e868b43b50f269e286c 2024-12-08T11:22:50,056 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/d4e5dfb1fa144d71bcdc712e3e5e7c47 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/d4e5dfb1fa144d71bcdc712e3e5e7c47 2024-12-08T11:22:50,057 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a4f16b815e9041368be72b26e233b86c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a4f16b815e9041368be72b26e233b86c 2024-12-08T11:22:50,057 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e4dd3a54c15b4025a8b2f12b6a9a8f79 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e4dd3a54c15b4025a8b2f12b6a9a8f79 2024-12-08T11:22:50,058 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2cacfe3828e44d5da87c912ad4f8cf17 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2cacfe3828e44d5da87c912ad4f8cf17 2024-12-08T11:22:50,059 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/eaa5e66180ad4127adc1c13569549c51 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/eaa5e66180ad4127adc1c13569549c51 2024-12-08T11:22:50,060 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/302a3e2e2c044a7b89be8724fd036c52 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/302a3e2e2c044a7b89be8724fd036c52 2024-12-08T11:22:50,061 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/940425bff65847d3ba48c1ab353622eb to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/940425bff65847d3ba48c1ab353622eb 2024-12-08T11:22:50,062 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/c5afaf35cf8f4fd5ac25db898db72d33 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/c5afaf35cf8f4fd5ac25db898db72d33 2024-12-08T11:22:50,062 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/7c38130299f648f7bf3b29ebbade1ba3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/7c38130299f648f7bf3b29ebbade1ba3 2024-12-08T11:22:50,063 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/f064fc2100e845ab910abf0ce2d24978 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/f064fc2100e845ab910abf0ce2d24978 2024-12-08T11:22:50,064 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2973f9f5665549d786930f9b7c32fd4a to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/2973f9f5665549d786930f9b7c32fd4a 2024-12-08T11:22:50,065 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/ee8f1762cfae4d35ba71295d45c570f4 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/ee8f1762cfae4d35ba71295d45c570f4 2024-12-08T11:22:50,066 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/285a8089ec1e4b009b58412cd7fd39a2 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/285a8089ec1e4b009b58412cd7fd39a2 2024-12-08T11:22:50,067 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/81fd3de7130840f79367f7cb97ce1d0c to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/81fd3de7130840f79367f7cb97ce1d0c 2024-12-08T11:22:50,067 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c86c911bee14d0dbd7e8631a931381d to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/3c86c911bee14d0dbd7e8631a931381d 2024-12-08T11:22:50,068 DEBUG [StoreCloser-TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/434b7f5998754a8caa628cd4e15dd371 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/434b7f5998754a8caa628cd4e15dd371 2024-12-08T11:22:50,071 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/recovered.edits/358.seqid, newMaxSeqId=358, maxSeqId=4 2024-12-08T11:22:50,071 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26. 2024-12-08T11:22:50,071 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] regionserver.HRegion(1635): Region close journal for b64ae42323ee772e9f6012e0f6975e26: 2024-12-08T11:22:50,073 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION, pid=186}] handler.UnassignRegionHandler(170): Closed b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,073 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=185 updating hbase:meta row=b64ae42323ee772e9f6012e0f6975e26, regionState=CLOSED 2024-12-08T11:22:50,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-12-08T11:22:50,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; CloseRegionProcedure b64ae42323ee772e9f6012e0f6975e26, server=355ef6e50110,46083,1733656795491 in 1.8660 sec 2024-12-08T11:22:50,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=185, resume processing ppid=184 2024-12-08T11:22:50,075 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, ppid=184, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=b64ae42323ee772e9f6012e0f6975e26, UNASSIGN in 1.8670 sec 2024-12-08T11:22:50,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-12-08T11:22:50,077 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8700 sec 2024-12-08T11:22:50,077 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733656970077"}]},"ts":"1733656970077"} 2024-12-08T11:22:50,078 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-08T11:22:50,080 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-08T11:22:50,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8790 sec 2024-12-08T11:22:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-12-08T11:22:50,306 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-12-08T11:22:50,307 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-08T11:22:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] procedure2.ProcedureExecutor(1098): Stored pid=187, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:50,308 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=187, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T11:22:50,308 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=187, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:50,310 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,312 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C, FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/recovered.edits] 2024-12-08T11:22:50,314 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/12259f48168d4670b0522216244447c9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/12259f48168d4670b0522216244447c9 2024-12-08T11:22:50,314 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/4f4d83ca58be4fdebf3e9290204426ef to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/4f4d83ca58be4fdebf3e9290204426ef 2024-12-08T11:22:50,315 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/676175e4976c4568acba87da08aefbcd to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/676175e4976c4568acba87da08aefbcd 2024-12-08T11:22:50,316 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/bf4da27296c3438f9e0b8358b1b850e3 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/A/bf4da27296c3438f9e0b8358b1b850e3 2024-12-08T11:22:50,317 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/8d40b1e5299841a2b2f89f07fd88f7c9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/8d40b1e5299841a2b2f89f07fd88f7c9 2024-12-08T11:22:50,318 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a1bc8801aaa242d8b62d70eef9413096 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/a1bc8801aaa242d8b62d70eef9413096 2024-12-08T11:22:50,319 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/bbe116f67f8e49f586bb65e6c390836b to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/bbe116f67f8e49f586bb65e6c390836b 2024-12-08T11:22:50,320 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/d8ccf21dfe83423592107cdbb17786aa to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/B/d8ccf21dfe83423592107cdbb17786aa 2024-12-08T11:22:50,322 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/04422d0af3a34e16b89a282530ff03a9 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/04422d0af3a34e16b89a282530ff03a9 2024-12-08T11:22:50,323 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a1200941c75a41589e53ef073797aafe to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/a1200941c75a41589e53ef073797aafe 2024-12-08T11:22:50,324 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e7745cfc63814740b5a883db38695bc0 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e7745cfc63814740b5a883db38695bc0 2024-12-08T11:22:50,324 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e85fd1fbd74249248efe86d4bc9848dd to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/C/e85fd1fbd74249248efe86d4bc9848dd 2024-12-08T11:22:50,326 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/recovered.edits/358.seqid to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26/recovered.edits/358.seqid 2024-12-08T11:22:50,327 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/default/TestAcidGuarantees/b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,327 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-08T11:22:50,327 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T11:22:50,328 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-08T11:22:50,330 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208002d4142894147f38260abc035c1bcd7_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208002d4142894147f38260abc035c1bcd7_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,331 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120803a6d437899048b686ed45b2602d29e4_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120803a6d437899048b686ed45b2602d29e4_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,332 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208063d7727374b4d9e803ffe0b78d8a842_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208063d7727374b4d9e803ffe0b78d8a842_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,333 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120821039d38be1247e1b45aae4a129bafe1_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120821039d38be1247e1b45aae4a129bafe1_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,334 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082c852e2ccb884a068d9b709e5c85530a_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412082c852e2ccb884a068d9b709e5c85530a_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,334 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083ed52982dd3b45099af50af35061ce3b_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412083ed52982dd3b45099af50af35061ce3b_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,335 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120840ed08ef1081497c970d56a2c240ac20_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120840ed08ef1081497c970d56a2c240ac20_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,336 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208464b15db921548e6b2b7f1e1a01c8100_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208464b15db921548e6b2b7f1e1a01c8100_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,337 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120846cc1cae251c46dfae6a0898017e5c3a_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120846cc1cae251c46dfae6a0898017e5c3a_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,338 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085bd5d62bab6c469cb9614f890a1d44ca_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085bd5d62bab6c469cb9614f890a1d44ca_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,339 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085e1ad6eb663442b29b3e902b50e1905a_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412085e1ad6eb663442b29b3e902b50e1905a_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,339 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120885ac977533ef4f0491418d413319591d_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024120885ac977533ef4f0491418d413319591d_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,340 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208888e37493ece40929253c4960ba915b0_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208888e37493ece40929253c4960ba915b0_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,341 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089af8cc10bc02492499997b5a839925a2_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412089af8cc10bc02492499997b5a839925a2_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,342 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b02a2ae4143640bbb1ca8180fdc52967_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208b02a2ae4143640bbb1ca8180fdc52967_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,342 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bc2fc2317b2c4c77a4253fc65e499204_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208bc2fc2317b2c4c77a4253fc65e499204_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,343 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c5be5feca38a467f86b1161d11efe085_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208c5be5feca38a467f86b1161d11efe085_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,344 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d00a70d52f1a43db84e2ad2315db5b9b_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d00a70d52f1a43db84e2ad2315db5b9b_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,345 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d546ba3e03874960a8fe3681de4bc6c5_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208d546ba3e03874960a8fe3681de4bc6c5_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,345 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ecf8a256501b4d61a86be9c2b0a14410_b64ae42323ee772e9f6012e0f6975e26 to hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241208ecf8a256501b4d61a86be9c2b0a14410_b64ae42323ee772e9f6012e0f6975e26 2024-12-08T11:22:50,346 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-08T11:22:50,348 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=187, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:50,349 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-08T11:22:50,351 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-12-08T11:22:50,352 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=187, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:50,352 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-12-08T11:22:50,352 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733656970352"}]},"ts":"9223372036854775807"} 2024-12-08T11:22:50,353 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-08T11:22:50,353 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b64ae42323ee772e9f6012e0f6975e26, NAME => 'TestAcidGuarantees,,1733656936437.b64ae42323ee772e9f6012e0f6975e26.', STARTKEY => '', ENDKEY => ''}] 2024-12-08T11:22:50,353 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-12-08T11:22:50,353 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733656970353"}]},"ts":"9223372036854775807"} 2024-12-08T11:22:50,354 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-08T11:22:50,356 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=187, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-08T11:22:50,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 49 msec 2024-12-08T11:22:50,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43409 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=187 2024-12-08T11:22:50,409 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 187 completed 2024-12-08T11:22:50,420 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=238 (was 241), OpenFileDescriptor=449 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=422 (was 501), ProcessCount=9 (was 9), AvailableMemoryMB=7443 (was 7471) 2024-12-08T11:22:50,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-08T11:22:50,420 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-08T11:22:50,420 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x38630296 to 127.0.0.1:63801 2024-12-08T11:22:50,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:50,420 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-08T11:22:50,420 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=217906132, stopped=false 2024-12-08T11:22:50,421 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=355ef6e50110,43409,1733656794727 2024-12-08T11:22:50,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T11:22:50,422 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-08T11:22:50,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:22:50,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-08T11:22:50,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:22:50,423 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:50,423 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '355ef6e50110,46083,1733656795491' ***** 2024-12-08T11:22:50,423 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T11:22:50,423 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-08T11:22:50,423 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-08T11:22:50,423 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-08T11:22:50,424 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(3579): Received CLOSE for 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1224): stopping server 355ef6e50110,46083,1733656795491 2024-12-08T11:22:50,424 DEBUG [RS:0;355ef6e50110:46083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-08T11:22:50,424 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9ba0a86166fe12d211f42156054bda9c, disabling compactions & flushes 2024-12-08T11:22:50,425 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. after waiting 0 ms 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:22:50,425 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 9ba0a86166fe12d211f42156054bda9c 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-08T11:22:50,425 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-08T11:22:50,425 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1603): Online Regions={9ba0a86166fe12d211f42156054bda9c=hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c., 1588230740=hbase:meta,,1.1588230740} 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-08T11:22:50,425 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-08T11:22:50,425 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-08T11:22:50,425 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-08T11:22:50,425 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:22:50,441 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c/.tmp/info/2400255456ab4e8daa3965f03e83edc5 is 45, key is default/info:d/1733656799906/Put/seqid=0 2024-12-08T11:22:50,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742534_1710 (size=5037) 2024-12-08T11:22:50,448 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/info/0457708037f64544bfd02f4e671c676f is 143, key is hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c./info:regioninfo/1733656799791/Put/seqid=0 2024-12-08T11:22:50,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742535_1711 (size=7725) 2024-12-08T11:22:50,452 INFO [regionserver/355ef6e50110:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T11:22:50,626 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:22:50,752 INFO [regionserver/355ef6e50110:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-08T11:22:50,752 INFO [regionserver/355ef6e50110:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-08T11:22:50,826 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 9ba0a86166fe12d211f42156054bda9c 2024-12-08T11:22:50,844 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c/.tmp/info/2400255456ab4e8daa3965f03e83edc5 2024-12-08T11:22:50,847 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c/.tmp/info/2400255456ab4e8daa3965f03e83edc5 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c/info/2400255456ab4e8daa3965f03e83edc5 2024-12-08T11:22:50,850 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c/info/2400255456ab4e8daa3965f03e83edc5, entries=2, sequenceid=6, filesize=4.9 K 2024-12-08T11:22:50,850 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 9ba0a86166fe12d211f42156054bda9c in 425ms, sequenceid=6, compaction requested=false 2024-12-08T11:22:50,851 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/info/0457708037f64544bfd02f4e671c676f 2024-12-08T11:22:50,853 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/namespace/9ba0a86166fe12d211f42156054bda9c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-08T11:22:50,854 INFO [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:22:50,854 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9ba0a86166fe12d211f42156054bda9c: 2024-12-08T11:22:50,854 DEBUG [RS_CLOSE_REGION-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733656798553.9ba0a86166fe12d211f42156054bda9c. 2024-12-08T11:22:50,868 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/rep_barrier/5b85af6efe564335a48c486ac34e35f9 is 102, key is TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3./rep_barrier:/1733656825121/DeleteFamily/seqid=0 2024-12-08T11:22:50,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742536_1712 (size=6025) 2024-12-08T11:22:51,026 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-08T11:22:51,226 DEBUG [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-08T11:22:51,272 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/rep_barrier/5b85af6efe564335a48c486ac34e35f9 2024-12-08T11:22:51,289 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/table/ca4a0ab157ce4c47b27946d594be0eec is 96, key is TestAcidGuarantees,,1733656800165.64a616eb95ce0fb49283d502a9d694a3./table:/1733656825121/DeleteFamily/seqid=0 2024-12-08T11:22:51,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742537_1713 (size=5942) 2024-12-08T11:22:51,292 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/table/ca4a0ab157ce4c47b27946d594be0eec 2024-12-08T11:22:51,295 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/info/0457708037f64544bfd02f4e671c676f as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/info/0457708037f64544bfd02f4e671c676f 2024-12-08T11:22:51,297 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/info/0457708037f64544bfd02f4e671c676f, entries=22, sequenceid=93, filesize=7.5 K 2024-12-08T11:22:51,298 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/rep_barrier/5b85af6efe564335a48c486ac34e35f9 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/rep_barrier/5b85af6efe564335a48c486ac34e35f9 2024-12-08T11:22:51,300 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/rep_barrier/5b85af6efe564335a48c486ac34e35f9, entries=6, sequenceid=93, filesize=5.9 K 2024-12-08T11:22:51,300 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/.tmp/table/ca4a0ab157ce4c47b27946d594be0eec as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/table/ca4a0ab157ce4c47b27946d594be0eec 2024-12-08T11:22:51,302 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/table/ca4a0ab157ce4c47b27946d594be0eec, entries=9, sequenceid=93, filesize=5.8 K 2024-12-08T11:22:51,303 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 878ms, sequenceid=93, compaction requested=false 2024-12-08T11:22:51,306 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-08T11:22:51,306 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-08T11:22:51,306 INFO [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-08T11:22:51,306 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-08T11:22:51,306 DEBUG [RS_CLOSE_META-regionserver/355ef6e50110:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-08T11:22:51,427 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1250): stopping server 355ef6e50110,46083,1733656795491; all regions closed. 2024-12-08T11:22:51,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741834_1010 (size=26050) 2024-12-08T11:22:51,433 DEBUG [RS:0;355ef6e50110:46083 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/oldWALs 2024-12-08T11:22:51,433 INFO [RS:0;355ef6e50110:46083 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 355ef6e50110%2C46083%2C1733656795491.meta:.meta(num 1733656798305) 2024-12-08T11:22:51,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741833_1009 (size=16376826) 2024-12-08T11:22:51,437 DEBUG [RS:0;355ef6e50110:46083 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/oldWALs 2024-12-08T11:22:51,437 INFO [RS:0;355ef6e50110:46083 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 355ef6e50110%2C46083%2C1733656795491:(num 1733656797876) 2024-12-08T11:22:51,437 DEBUG [RS:0;355ef6e50110:46083 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:51,437 INFO [RS:0;355ef6e50110:46083 {}] regionserver.LeaseManager(133): Closed leases 2024-12-08T11:22:51,437 INFO [RS:0;355ef6e50110:46083 {}] hbase.ChoreService(370): Chore service for: regionserver/355ef6e50110:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-08T11:22:51,437 INFO [regionserver/355ef6e50110:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-08T11:22:51,438 INFO [RS:0;355ef6e50110:46083 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:46083 2024-12-08T11:22:51,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/355ef6e50110,46083,1733656795491 2024-12-08T11:22:51,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-08T11:22:51,443 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [355ef6e50110,46083,1733656795491] 2024-12-08T11:22:51,443 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 355ef6e50110,46083,1733656795491; numProcessing=1 2024-12-08T11:22:51,444 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/355ef6e50110,46083,1733656795491 already deleted, retry=false 2024-12-08T11:22:51,444 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 355ef6e50110,46083,1733656795491 expired; onlineServers=0 2024-12-08T11:22:51,445 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '355ef6e50110,43409,1733656794727' ***** 2024-12-08T11:22:51,445 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-08T11:22:51,445 DEBUG [M:0;355ef6e50110:43409 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34d91da8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=355ef6e50110/172.17.0.2:0 2024-12-08T11:22:51,445 INFO [M:0;355ef6e50110:43409 {}] regionserver.HRegionServer(1224): stopping server 355ef6e50110,43409,1733656794727 2024-12-08T11:22:51,445 INFO [M:0;355ef6e50110:43409 {}] regionserver.HRegionServer(1250): stopping server 355ef6e50110,43409,1733656794727; all regions closed. 2024-12-08T11:22:51,445 DEBUG [M:0;355ef6e50110:43409 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-08T11:22:51,445 DEBUG [M:0;355ef6e50110:43409 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-08T11:22:51,445 DEBUG [M:0;355ef6e50110:43409 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-08T11:22:51,445 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-08T11:22:51,445 DEBUG [master/355ef6e50110:0:becomeActiveMaster-HFileCleaner.large.0-1733656797598 {}] cleaner.HFileCleaner(306): Exit Thread[master/355ef6e50110:0:becomeActiveMaster-HFileCleaner.large.0-1733656797598,5,FailOnTimeoutGroup] 2024-12-08T11:22:51,445 INFO [M:0;355ef6e50110:43409 {}] hbase.ChoreService(370): Chore service for: master/355ef6e50110:0 had [] on shutdown 2024-12-08T11:22:51,445 DEBUG [M:0;355ef6e50110:43409 {}] master.HMaster(1733): Stopping service threads 2024-12-08T11:22:51,446 INFO [M:0;355ef6e50110:43409 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-08T11:22:51,446 ERROR [M:0;355ef6e50110:43409 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:36759 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:36759,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-12-08T11:22:51,446 DEBUG [master/355ef6e50110:0:becomeActiveMaster-HFileCleaner.small.0-1733656797599 {}] cleaner.HFileCleaner(306): Exit Thread[master/355ef6e50110:0:becomeActiveMaster-HFileCleaner.small.0-1733656797599,5,FailOnTimeoutGroup] 2024-12-08T11:22:51,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-08T11:22:51,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-08T11:22:51,447 INFO [M:0;355ef6e50110:43409 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-08T11:22:51,447 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-08T11:22:51,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-08T11:22:51,447 DEBUG [M:0;355ef6e50110:43409 {}] zookeeper.ZKUtil(347): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-08T11:22:51,447 WARN [M:0;355ef6e50110:43409 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-08T11:22:51,447 INFO [M:0;355ef6e50110:43409 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-08T11:22:51,447 INFO [M:0;355ef6e50110:43409 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-08T11:22:51,447 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-08T11:22:51,447 INFO [M:0;355ef6e50110:43409 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:22:51,447 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:22:51,447 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-08T11:22:51,448 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:22:51,448 INFO [M:0;355ef6e50110:43409 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=817.98 KB heapSize=1009.06 KB 2024-12-08T11:22:51,463 DEBUG [M:0;355ef6e50110:43409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/957b234547314608be0c5942d0a81917 is 82, key is hbase:meta,,1/info:regioninfo/1733656798443/Put/seqid=0 2024-12-08T11:22:51,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742538_1714 (size=5672) 2024-12-08T11:22:51,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T11:22:51,543 INFO [RS:0;355ef6e50110:46083 {}] regionserver.HRegionServer(1307): Exiting; stopping=355ef6e50110,46083,1733656795491; zookeeper connection closed. 2024-12-08T11:22:51,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46083-0x100715b0c0a0001, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T11:22:51,544 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27b63bb1 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27b63bb1 2024-12-08T11:22:51,544 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-08T11:22:51,868 INFO [M:0;355ef6e50110:43409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2366 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/957b234547314608be0c5942d0a81917 2024-12-08T11:22:51,888 DEBUG [M:0;355ef6e50110:43409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5abd1f555c934558a590bda3dc1f7458 is 2283, key is \x00\x00\x00\x00\x00\x00\x00\xA2/proc:d/1733656939453/Put/seqid=0 2024-12-08T11:22:51,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742539_1715 (size=46443) 2024-12-08T11:22:52,292 INFO [M:0;355ef6e50110:43409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=817.42 KB at sequenceid=2366 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5abd1f555c934558a590bda3dc1f7458 2024-12-08T11:22:52,295 INFO [M:0;355ef6e50110:43409 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5abd1f555c934558a590bda3dc1f7458 2024-12-08T11:22:52,310 DEBUG [M:0;355ef6e50110:43409 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/504e8698b5bf45538cafe07933338873 is 69, key is 355ef6e50110,46083,1733656795491/rs:state/1733656797642/Put/seqid=0 2024-12-08T11:22:52,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073742540_1716 (size=5156) 2024-12-08T11:22:52,713 INFO [M:0;355ef6e50110:43409 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2366 (bloomFilter=true), to=hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/504e8698b5bf45538cafe07933338873 2024-12-08T11:22:52,716 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/957b234547314608be0c5942d0a81917 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/957b234547314608be0c5942d0a81917 2024-12-08T11:22:52,718 INFO [M:0;355ef6e50110:43409 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/957b234547314608be0c5942d0a81917, entries=8, sequenceid=2366, filesize=5.5 K 2024-12-08T11:22:52,719 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5abd1f555c934558a590bda3dc1f7458 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5abd1f555c934558a590bda3dc1f7458 2024-12-08T11:22:52,721 INFO [M:0;355ef6e50110:43409 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5abd1f555c934558a590bda3dc1f7458 2024-12-08T11:22:52,721 INFO [M:0;355ef6e50110:43409 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5abd1f555c934558a590bda3dc1f7458, entries=187, sequenceid=2366, filesize=45.4 K 2024-12-08T11:22:52,721 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/504e8698b5bf45538cafe07933338873 as hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/504e8698b5bf45538cafe07933338873 2024-12-08T11:22:52,723 INFO [M:0;355ef6e50110:43409 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:36759/user/jenkins/test-data/c727ffc4-f3df-7013-aedb-6bce51fd290c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/504e8698b5bf45538cafe07933338873, entries=1, sequenceid=2366, filesize=5.0 K 2024-12-08T11:22:52,724 INFO [M:0;355ef6e50110:43409 {}] regionserver.HRegion(3040): Finished flush of dataSize ~817.98 KB/837607, heapSize ~1008.77 KB/1032976, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1276ms, sequenceid=2366, compaction requested=false 2024-12-08T11:22:52,725 INFO [M:0;355ef6e50110:43409 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-08T11:22:52,725 DEBUG [M:0;355ef6e50110:43409 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-08T11:22:52,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42827 is added to blk_1073741830_1006 (size=992668) 2024-12-08T11:22:52,728 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-08T11:22:52,728 INFO [M:0;355ef6e50110:43409 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-08T11:22:52,728 INFO [M:0;355ef6e50110:43409 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43409 2024-12-08T11:22:52,729 DEBUG [M:0;355ef6e50110:43409 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/355ef6e50110,43409,1733656794727 already deleted, retry=false 2024-12-08T11:22:52,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T11:22:52,831 INFO [M:0;355ef6e50110:43409 {}] regionserver.HRegionServer(1307): Exiting; stopping=355ef6e50110,43409,1733656794727; zookeeper connection closed. 2024-12-08T11:22:52,831 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:43409-0x100715b0c0a0000, quorum=127.0.0.1:63801, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-08T11:22:52,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-08T11:22:52,839 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T11:22:52,839 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T11:22:52,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T11:22:52,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/hadoop.log.dir/,STOPPED} 2024-12-08T11:22:52,842 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-08T11:22:52,842 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-08T11:22:52,843 WARN [BP-1692739221-172.17.0.2-1733656791850 heartbeating to localhost/127.0.0.1:36759 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-08T11:22:52,843 WARN [BP-1692739221-172.17.0.2-1733656791850 heartbeating to localhost/127.0.0.1:36759 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1692739221-172.17.0.2-1733656791850 (Datanode Uuid 5d6ecd1b-00f9-49e2-800a-270a73fe2319) service to localhost/127.0.0.1:36759 2024-12-08T11:22:52,845 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/dfs/data/data1/current/BP-1692739221-172.17.0.2-1733656791850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T11:22:52,845 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/cluster_604f4e65-1846-6ba6-9284-e762fd155be9/dfs/data/data2/current/BP-1692739221-172.17.0.2-1733656791850 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-08T11:22:52,846 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-08T11:22:52,854 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-08T11:22:52,854 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-08T11:22:52,854 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-08T11:22:52,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-08T11:22:52,855 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/307c4604-37d1-f8f2-e1db-53a87a69d7ae/hadoop.log.dir/,STOPPED} 2024-12-08T11:22:52,874 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-08T11:22:53,024 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down